code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 315
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315
| 1
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class snake_case ( tf.keras.layers.Layer ):
def __init__( self : Any , A : Dict[str, int] , A : List[str] , A : int = None , A : int = None ):
'''simple docstring'''
super().__init__()
a : Tuple = pad_token_id
a : Any = max_length
a : Any = vocab
a : Any = merges
a : Any = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def lowerCamelCase__ ( cls : int , A : GPTaTokenizer , *A : Tuple , **A : str ):
'''simple docstring'''
a : Optional[Any] = [' '.join(A ) for m in tokenizer.bpe_ranks.keys()]
a : Optional[int] = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def lowerCamelCase__ ( cls : Dict , A : Union[str, os.PathLike] , *A : Tuple , **A : Any ):
'''simple docstring'''
a : str = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def lowerCamelCase__ ( cls : Any , A : List[Any] ):
'''simple docstring'''
return cls(**A )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase__ ( self : int , A : Tuple , A : int = None ):
'''simple docstring'''
a : Optional[Any] = self.tf_tokenizer(A )
a : str = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
a : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
a, a : List[str] = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 186
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = BartphoTokenizer
__magic_name__ = False
__magic_name__ = True
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
a : Any = ['▁This', '▁is', '▁a', '▁t', 'est']
a : List[Any] = dict(zip(A , range(len(A ) ) ) )
a : int = {'unk_token': '<unk>'}
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a : Optional[int] = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Dict , **A : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A )
def lowerCamelCase__ ( self : Optional[int] , A : Dict ):
'''simple docstring'''
a : Tuple = 'This is a là test'
a : List[Any] = 'This is a<unk><unk> test'
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Tuple = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
a : int = 'This is a là test'
a : int = '▁This ▁is ▁a ▁l à ▁t est'.split()
a : str = tokenizer.tokenize(A )
self.assertListEqual(A , A )
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Dict = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 186
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any ={
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =["ChineseCLIPFeatureExtractor"]
_lowercase : Union[str, Any] =["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple =[
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_lowercase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170
|
import math
import sys
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : str = """"""
try:
with open(_lowercase , """rb""") as binary_file:
a__ : Any = binary_file.read()
for dat in data:
a__ : Dict = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""")
sys.exit()
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : Optional[Any] = {"""0""": """0""", """1""": """1"""}
a__ , a__ : Optional[int] = """""", """"""
a__ : int = len(_lowercase)
for i in range(len(_lowercase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a__ : List[str] = lexicon[curr_string]
result += last_match_id
a__ : Any = last_match_id + """0"""
if math.loga(_lowercase).is_integer():
a__ : Union[str, Any] = {}
for curr_key in list(_lowercase):
a__ : Optional[Any] = lexicon.pop(_lowercase)
a__ : Union[str, Any] = new_lex
a__ : str = last_match_id + """1"""
index += 1
a__ : List[Any] = """"""
return result
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> None:
"""simple docstring"""
a__ : List[Any] = 8
try:
with open(_lowercase , """wb""") as opened_file:
a__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowercase) , _lowercase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append("""10000000""")
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowercase , 2).to_bytes(1 , byteorder="""big"""))
except OSError:
print("""File not accessible""")
sys.exit()
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
a__ : Optional[Any] = data_bits[counter:]
a__ : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> None:
"""simple docstring"""
a__ : Dict = read_file_binary(_lowercase)
a__ : str = remove_prefix(_lowercase)
a__ : List[str] = decompress_data(_lowercase)
write_file_binary(_lowercase , _lowercase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 170
| 1
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__ ( a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , UpperCamelCase__ : bool , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case : Optional[Any] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case : Tuple = torch.zeros(lowercase_ , lowercase_ )
else:
snake_case : Any = None
snake_case : List[str] = torch.nn.Parameter(lowercase_ )
class snake_case__ ( a_ ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
def __init__( self : Any , UpperCamelCase__ : VQModel , UpperCamelCase__ : CLIPTextModel , UpperCamelCase__ : CLIPTokenizer , UpperCamelCase__ : TransformeraDModel , UpperCamelCase__ : VQDiffusionScheduler , UpperCamelCase__ : LearnedClassifierFreeSamplingEmbeddings , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=lowercase_ , transformer=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
snake_case : int = len(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else 1
# get prompt text embeddings
snake_case : int = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
snake_case : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
snake_case : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case : str = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate text embeddings for each generation per prompt
snake_case : Tuple = prompt_embeds.repeat_interleave(lowercase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case : str = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_ , 1 , 1 )
else:
snake_case : List[Any] = [''''''] * batch_size
snake_case : Any = text_input_ids.shape[-1]
snake_case : Optional[Any] = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' , )
snake_case : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case : List[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case : Dict = negative_prompt_embeds.shape[1]
snake_case : List[Any] = negative_prompt_embeds.repeat(1 , lowercase_ , 1 )
snake_case : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : int = 100 , UpperCamelCase__ : float = 5.0 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , ) -> str:
"""simple docstring"""
if isinstance(lowercase_ , lowercase_ ):
snake_case : int = 1
elif isinstance(lowercase_ , lowercase_ ):
snake_case : Optional[int] = len(lowercase_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}' )
snake_case : Any = batch_size * num_images_per_prompt
snake_case : List[Any] = guidance_scale > 1.0
snake_case : Optional[int] = self._encode_prompt(lowercase_ , lowercase_ , lowercase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(lowercase_ )}.' )
# get the initial completely masked latents unless the user supplied it
snake_case : int = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case : List[str] = self.transformer.num_vector_embeds - 1
snake_case : Any = torch.full(lowercase_ , lowercase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
snake_case : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
snake_case : Dict = self.scheduler.timesteps.to(self.device )
snake_case : Any = latents
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the sample if we are doing classifier free guidance
snake_case : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case : Dict = self.transformer(lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ ).sample
if do_classifier_free_guidance:
snake_case : List[Any] = model_output.chunk(2 )
snake_case : Tuple = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase_ , dim=1 , keepdim=lowercase_ )
snake_case : Dict = self.truncate(lowercase_ , lowercase_ )
# remove `log(0)`'s (`-inf`s)
snake_case : Optional[Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[str] = self.scheduler.step(lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
snake_case : List[str] = self.vqvae.config.vq_embed_dim
snake_case : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case : Optional[Any] = self.vqvae.quantize.get_codebook_entry(lowercase_ , shape=lowercase_ )
snake_case : Dict = self.vqvae.decode(lowercase_ , force_not_quantize=lowercase_ ).sample
snake_case : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Optional[Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
def lowerCAmelCase ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = torch.sort(lowercase_ , 1 , descending=lowercase_ )
snake_case : Any = torch.exp(lowercase_ )
snake_case : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case : Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , lowercase_ )
snake_case : Tuple = torch.cat((all_true, keep_mask) , dim=1 )
snake_case : List[str] = keep_mask[:, :-1, :]
snake_case : Optional[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case : Dict = log_p_x_0.clone()
snake_case : Tuple = -torch.inf # -inf = log(0)
return rv
| 358
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """efficientnet"""
def __init__( self : str , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 600 , UpperCamelCase__ : float = 2.0 , UpperCamelCase__ : float = 3.1 , UpperCamelCase__ : int = 8 , UpperCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCamelCase__ : List[int] = [] , UpperCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase__ : float = 0.25 , UpperCamelCase__ : str = "swish" , UpperCamelCase__ : int = 2560 , UpperCamelCase__ : str = "mean" , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : float = 0.001 , UpperCamelCase__ : float = 0.99 , UpperCamelCase__ : float = 0.5 , UpperCamelCase__ : float = 0.2 , **UpperCamelCase__ : Any , ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
snake_case : Dict = num_channels
snake_case : List[Any] = image_size
snake_case : Any = width_coefficient
snake_case : int = depth_coefficient
snake_case : List[str] = depth_divisor
snake_case : Tuple = kernel_sizes
snake_case : Optional[Any] = in_channels
snake_case : Optional[Any] = out_channels
snake_case : Dict = depthwise_padding
snake_case : Optional[Any] = strides
snake_case : List[str] = num_block_repeats
snake_case : Any = expand_ratios
snake_case : Any = squeeze_expansion_ratio
snake_case : Optional[Any] = hidden_act
snake_case : Optional[int] = hidden_dim
snake_case : Dict = pooling_type
snake_case : Any = initializer_range
snake_case : Optional[Any] = batch_norm_eps
snake_case : Tuple = batch_norm_momentum
snake_case : Any = dropout_rate
snake_case : str = drop_connect_rate
snake_case : Dict = sum(UpperCamelCase__ ) * 4
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = version.parse("""1.11""" )
@property
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self : Tuple ) -> float:
"""simple docstring"""
return 1e-5
| 83
| 0
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase_ = 'CompVis/stable-diffusion-v1-1'
lowerCamelCase_ = 'CompVis/stable-diffusion-v1-2'
lowerCamelCase_ = 'CompVis/stable-diffusion-v1-3'
lowerCamelCase_ = 'CompVis/stable-diffusion-v1-4'
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int = True , ) -> Tuple:
super()._init_()
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline(
vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , requires_safety_checker=lowerCAmelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, Any]:
return {k: getattr(self , lowerCAmelCase_ ) for k in self.config.keys() if not k.startswith("_" )}
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.enable_attention_slicing(lowerCAmelCase_ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] = 512 , lowerCAmelCase_ : Union[str, Any] = 512 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[Any] = 7.5 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : List[str] = 1 , lowerCAmelCase_ : List[str] = 0.0 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : int = "pil" , lowerCAmelCase_ : str = True , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : List[Any] = 1 , **lowerCAmelCase_ : Any , ) -> int:
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str = 512 , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : List[Any] = 7.5 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : List[str] = 1 , lowerCAmelCase_ : List[Any] = 0.0 , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Any = "pil" , lowerCAmelCase_ : Union[str, Any] = True , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Dict = 1 , **lowerCAmelCase_ : Tuple , ) -> Dict:
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] = 512 , lowerCAmelCase_ : Tuple = 512 , lowerCAmelCase_ : Optional[Any] = 50 , lowerCAmelCase_ : Dict = 7.5 , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Any = 1 , lowerCAmelCase_ : Optional[int] = 0.0 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : int = "pil" , lowerCAmelCase_ : Optional[Any] = True , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : List[str] = 1 , **lowerCAmelCase_ : int , ) -> Union[str, Any]:
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict = 512 , lowerCAmelCase_ : Union[str, Any] = 512 , lowerCAmelCase_ : Tuple = 50 , lowerCAmelCase_ : int = 7.5 , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Dict = 1 , lowerCAmelCase_ : List[str] = 0.0 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Union[str, Any] = "pil" , lowerCAmelCase_ : Any = True , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Optional[Any] = 1 , **lowerCAmelCase_ : Optional[int] , ) -> Optional[int]:
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] = 512 , lowerCAmelCase_ : Tuple = 512 , lowerCAmelCase_ : Any = 50 , lowerCAmelCase_ : Tuple = 7.5 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : List[Any] = 0.0 , lowerCAmelCase_ : str = None , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Optional[Any] = "pil" , lowerCAmelCase_ : List[str] = True , lowerCAmelCase_ : int = None , lowerCAmelCase_ : str = 1 , **lowerCAmelCase_ : Tuple , ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
self.to(lowerCAmelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : List[Any] = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : Tuple = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : Any = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 268
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gptj'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
__UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_head
def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ) -> int:
return 1_3
| 349
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
_lowerCAmelCase = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 98
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCamelCase = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCamelCase = {
"camembert-base": 512,
}
_UpperCamelCase = "▁"
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ["""input_ids""", """attention_mask"""]
_UpperCamelCase = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
__lowerCAmelCase : List[str] = vocab_file
__lowerCAmelCase : List[Any] = False if not self.vocab_file else True
def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : Any = [self.cls_token_id]
__lowerCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = [self.sep_token_id]
__lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : Tuple = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 275
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ )
else:
__lowerCAmelCase : Optional[int] = np.full((len(lowercase__ ), sequence_length) , lowercase__ )
for i, tensor in enumerate(lowercase__ ):
if padding_side == "right":
if isinstance(lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = tensor[:sequence_length]
else:
__lowerCAmelCase : int = tensor[:sequence_length]
else:
if isinstance(lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = tensor[:sequence_length]
else:
__lowerCAmelCase : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Union[str, Any] = ord(lowercase__ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
__lowerCAmelCase : int = unicodedata.category(lowercase__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = 42
_UpperCamelCase = True
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = -100
_UpperCamelCase = "pt"
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
import torch
__lowerCAmelCase : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCAmelCase : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCAmelCase : List[Any] = self.tokenizer.pad(
A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCAmelCase : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCAmelCase : Optional[int] = self.tokenizer.padding_side
if padding_side == "right":
__lowerCAmelCase : Any = [
list(A_ ) + [self.label_pad_token_id] * (sequence_length - len(A_ )) for label in labels
]
else:
__lowerCAmelCase : Optional[int] = [
[self.label_pad_token_id] * (sequence_length - len(A_ )) + list(A_ ) for label in labels
]
__lowerCAmelCase : Tuple = [feature['''ner_tags'''] for feature in features]
__lowerCAmelCase : List[Any] = padding_tensor(A_ , -1 , A_ , A_ )
__lowerCAmelCase : Optional[int] = [feature['''original_entity_spans'''] for feature in features]
__lowerCAmelCase : Any = padding_tensor(A_ , (-1, -1) , A_ , A_ )
__lowerCAmelCase : Optional[Any] = {k: torch.tensor(A_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 275
| 1
|
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = '''vision-encoder-decoder'''
__UpperCAmelCase : Tuple = True
def __init__( self : List[Any] ,**_a : List[Any] ):
'''simple docstring'''
super().__init__(**_a )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_a : Tuple = kwargs.pop('encoder' )
_a : int = encoder_config.pop('model_type' )
_a : List[str] = kwargs.pop('decoder' )
_a : Union[str, Any] = decoder_config.pop('model_type' )
_a : Optional[Any] = AutoConfig.for_model(_a ,**_a )
_a : List[Any] = AutoConfig.for_model(_a ,**_a )
_a : int = True
@classmethod
def __lowercase ( cls : List[Any] ,_a : PretrainedConfig ,_a : PretrainedConfig ,**_a : Dict ):
'''simple docstring'''
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_a : Optional[int] = True
_a : str = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = copy.deepcopy(self.__dict__ )
_a : Tuple = self.encoder.to_dict()
_a : Optional[Any] = self.decoder.to_dict()
_a : str = self.__class__.model_type
return output
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = version.parse('''1.11''' )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return 1E-4
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = OrderedDict()
_a : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_a : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_a : Any = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def __lowercase ( self : Any ,_a : "PreTrainedTokenizerBase" ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
import torch
_a : List[str] = OrderedDict()
_a : Optional[Any] = super().generate_dummy_inputs(
_a ,batch_size=_a ,seq_length=_a ,is_pair=_a ,framework=_a )
_a, _a : Optional[int] = dummy_input['input_ids'].shape
_a : Dict = (batch, encoder_sequence, self._config.encoder_hidden_size)
_a : Tuple = dummy_input.pop('input_ids' )
_a : int = dummy_input.pop('attention_mask' )
_a : str = torch.zeros(_a )
return common_inputs
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : List[str] ,_a : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(_a )
def __lowercase ( self : List[str] ,_a : PretrainedConfig ,_a : PretrainedConfig ,_a : str = "default" ):
'''simple docstring'''
_a : Optional[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_a ,_a )
| 5
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = '''A painting of a squirrel eating a burger '''
A_ : str = torch.manual_seed(0 )
A_ : str = pipe(
prompt=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = generator.manual_seed(0 )
A_ : Union[str, Any] = pipe(
prompt=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Dict = '''A painting of a squirrel eating a burger '''
A_ : str = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
A_ : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 186
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "t5"
snake_case = ["past_key_values"]
snake_case = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _SCREAMING_SNAKE_CASE=3_2128 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
A_ : List[Any] = vocab_size
A_ : int = d_model
A_ : Optional[Any] = d_kv
A_ : str = d_ff
A_ : int = num_layers
A_ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : Optional[Any] = num_heads
A_ : Union[str, Any] = relative_attention_num_buckets
A_ : Dict = relative_attention_max_distance
A_ : List[str] = dropout_rate
A_ : Dict = layer_norm_epsilon
A_ : str = initializer_factor
A_ : Dict = feed_forward_proj
A_ : int = use_cache
A_ : Optional[int] = self.feed_forward_proj.split('''-''' )
A_ : Optional[Any] = act_info[-1]
A_ : Optional[Any] = act_info[0] == '''gated'''
if len(_SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(_SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A_ : Tuple = '''gelu_new'''
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
A_ : List[str] = '''past_encoder_sequence + sequence'''
A_ : Optional[int] = {0: '''batch'''}
A_ : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return 13
| 186
| 1
|
"""simple docstring"""
from math import ceil
def _snake_case ( lowercase__ : Optional[Any] = 1_0_0_1 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ :List[Any] = 2 * i + 1
lowerCAmelCase_ :Tuple = 2 * i
lowerCAmelCase_ :Optional[int] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 354
|
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1
| 0
|
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : str = FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = 4
_lowerCamelCase : List[str] = 3
_lowerCamelCase : List[Any] = (3_2, 3_2)
_lowerCamelCase : str = jax.random.PRNGKey(0 )
_lowerCamelCase : int = jax.random.uniform(__lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
| 72
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = abs(UpperCAmelCase_ )
_UpperCamelCase : int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = abs(UpperCAmelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def A__ ( UpperCAmelCase_ ):
return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) )
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
_UpperCamelCase : str = f'{func.__name__}({value})'
_UpperCamelCase : Tuple = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 83
| 0
|
"""simple docstring"""
import argparse
import os
import re
snake_case = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
snake_case = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
snake_case = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def lowerCamelCase__ ( lowercase , lowercase = False ):
"""simple docstring"""
with open(_snake_case , "r" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE : str = f.read()
SCREAMING_SNAKE_CASE : List[Any] = content.split("\n" )
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[Any] = 0
while line_idx < len(_snake_case ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
SCREAMING_SNAKE_CASE : List[str] = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
SCREAMING_SNAKE_CASE : str = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
SCREAMING_SNAKE_CASE : Optional[Any] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
SCREAMING_SNAKE_CASE : Optional[int] = sorted(_snake_case , key=lambda lowercase : _re_identifier.search(_snake_case ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write("\n".join(_snake_case ) )
elif "\n".join(_snake_case ) != content:
return True
def lowerCamelCase__ ( lowercase = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [os.path.join(_snake_case , _snake_case ) for f in os.listdir(_snake_case ) if f.endswith(".py" )]
SCREAMING_SNAKE_CASE : Optional[Any] = [sort_auto_mapping(_snake_case , overwrite=_snake_case ) for fname in fnames]
if not overwrite and any(_snake_case ):
SCREAMING_SNAKE_CASE : List[str] = [f for f, d in zip(_snake_case , _snake_case ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(_snake_case )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
snake_case = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 352
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 319
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase: Any = '''bert'''
def __init__( self : List[str] ,A_ : Any=3_0522 ,A_ : Any=768 ,A_ : List[Any]=12 ,A_ : Optional[Any]=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Optional[int]=0.1 ,A_ : Union[str, Any]=0.1 ,A_ : Any=512 ,A_ : Tuple=2 ,A_ : List[str]=0.02 ,A_ : Optional[Any]=1e-12 ,A_ : Optional[int]=0 ,A_ : str="absolute" ,A_ : Dict=True ,A_ : List[Any]=None ,**A_ : Optional[Any] ,) -> Tuple:
super().__init__(pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = classifier_dropout
class lowerCAmelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 74
|
"""simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_:List[str] = 16
SCREAMING_SNAKE_CASE_:Tuple = 32
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A : str = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A : Optional[int] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
A : int = 8
else:
A : List[Any] = None
return tokenizer.pad(
_lowerCAmelCase , padding="""longest""" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
A : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE_:List[str] = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowerCAmelCase ) == "1":
A : str = 2
# Initialize accelerator
A : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : Dict = config["""lr"""]
A : str = int(config["""num_epochs"""] )
A : Tuple = int(config["""seed"""] )
A : Dict = int(config["""batch_size"""] )
A : List[Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
A : Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
A : Tuple = MAX_GPU_BATCH_SIZE
set_seed(_lowerCAmelCase )
A , A : Optional[int] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
A : List[str] = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
A : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Any = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A : Union[str, Any] = model(**_lowerCAmelCase )
A : Tuple = outputs.loss
A : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A : Optional[int] = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : List[str] = model(**_lowerCAmelCase )
A : Union[str, Any] = outputs.logits.argmax(dim=-1 )
A , A : List[str] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
A : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowerCAmelCase )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
A : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A : int = parser.parse_args()
A : Any = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 115
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
A : Optional[int] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Any = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
A : str = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Optional[Any] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
A : List[str] = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
A : Tuple = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A : List[str] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
A : Optional[int] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : List[str] = {}
import re
A : Any = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : str = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : Union[str, Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : List[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : Optional[Any] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : List[str] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : Optional[Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
A : Tuple = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : List[Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCAmelCase ):
A : Optional[Any] = re_encoder_block_conv_in.match(_lowerCAmelCase )
A : Tuple = regex_match.groups()
A : str = int(groups[2] ) * 2 + int(groups[3] )
A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
A : Any = re_encoder_block_conv_in.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_encoder_block_resnet.match(_lowerCAmelCase )
A : str = regex_match.groups()
A : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
A : Any = {"""1""": 1, """3""": 2}[groups[-2]]
A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : str = prefix + resnet_block
A : str = re_encoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCAmelCase ):
A : List[str] = re_encoder_block_proj_out.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
A : Optional[int] = re_encoder_block_proj_out.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCAmelCase ):
A : Union[str, Any] = re_decoder_block_conv_out.match(_lowerCAmelCase )
A : Dict = regex_match.groups()
A : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
A : int = re_decoder_block_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_decoder_block_resnet.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : str = {"""1""": 1, """3""": 2}[groups[-2]]
A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
A : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Tuple = prefix + resnet_block
A : Tuple = re_decoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCAmelCase ):
A : Optional[Any] = re_decoder_block_proj_in.match(_lowerCAmelCase )
A : Any = regex_match.groups()
A : Optional[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
A : Dict = re_decoder_block_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_prior_cond_conv_out.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Tuple = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
A : List[str] = re_prior_cond_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCAmelCase ):
A : Any = re_prior_cond_resnet.match(_lowerCAmelCase )
A : Any = regex_match.groups()
A : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Optional[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
A : Tuple = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Dict = prefix + resnet_block
A : Union[str, Any] = re_prior_cond_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCAmelCase ):
A : List[Any] = re_prior_cond_proj_in.match(_lowerCAmelCase )
A : Optional[int] = regex_match.groups()
A : Tuple = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
A : Optional[int] = re_prior_cond_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# keep original key
else:
A : str = original_key
A : List[str] = replace_key(_lowerCAmelCase )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
A : str = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
A : Union[str, Any] = original_key
A : Union[str, Any] = original_key
A : List[str] = value
return new_dict
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Tuple:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
A : Optional[Any] = requests.get(f'''{PREFIX}{file}''' , allow_redirects=_lowerCAmelCase )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=_lowerCAmelCase )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
A : Optional[int] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
A : List[Any] = JukeboxConfig.from_pretrained(_lowerCAmelCase )
A : Dict = JukeboxModel(_lowerCAmelCase )
A : str = []
A : Optional[int] = {}
for i, dict_name in enumerate(_lowerCAmelCase ):
A : str = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
A : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
A : Dict = old_dic[k]
elif k.endswith(""".w""" ):
A : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A : Union[str, Any] = old_dic[k]
else:
A : Optional[int] = old_dic[k]
A : List[str] = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
A : List[Any] = fix_jukebox_keys(_lowerCAmelCase , model.state_dict() , _lowerCAmelCase , _lowerCAmelCase )
weight_dict.append(_lowerCAmelCase )
A : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE_:int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 115
| 1
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''vision-encoder-decoder'''
SCREAMING_SNAKE_CASE__ = True
def __init__(self , **UpperCAmelCase ) -> str:
super().__init__(**UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
_lowercase =kwargs.pop('''encoder''' )
_lowercase =encoder_config.pop('''model_type''' )
_lowercase =kwargs.pop('''decoder''' )
_lowercase =decoder_config.pop('''model_type''' )
_lowercase =AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
_lowercase =AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
_lowercase =True
@classmethod
def __A (cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> PretrainedConfig:
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_lowercase =True
_lowercase =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase )
def __A (self ) -> Optional[Any]:
_lowercase =copy.deepcopy(self.__dict__ )
_lowercase =self.encoder.to_dict()
_lowercase =self.decoder.to_dict()
_lowercase =self.__class__.model_type
return output
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''')
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A (self ) -> float:
return 1e-4
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class lowerCamelCase__ ( lowerCAmelCase):
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
_lowercase =OrderedDict()
_lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_lowercase ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def __A (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
_lowercase =OrderedDict()
_lowercase =super().generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
_lowercase , _lowercase =dummy_input['''input_ids'''].shape
_lowercase =(batch, encoder_sequence, self._config.encoder_hidden_size)
_lowercase =dummy_input.pop('''input_ids''' )
_lowercase =dummy_input.pop('''attention_mask''' )
_lowercase =torch.zeros(UpperCAmelCase )
return common_inputs
class lowerCamelCase__ ( lowerCAmelCase):
@property
def __A (self ) -> None:
pass
def __A (self , UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "default" ) -> OnnxConfig:
_lowercase =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase , UpperCAmelCase )
| 5
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5
| 1
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]="resnet50" , lowerCAmelCase : str=3 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=True , lowerCAmelCase : List[str]=True , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : int = out_indices if out_indices is not None else [4]
__lowerCAmelCase : int = stage_names
__lowerCAmelCase : Dict = out_features
__lowerCAmelCase : Optional[int] = backbone
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Optional[Any] = image_size
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
__lowerCAmelCase : int = is_training
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
"""simple docstring"""
__lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = TimmBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
"""simple docstring"""
__lowerCAmelCase : str = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple =(TimmBackbone,) if is_torch_available() else ()
lowerCamelCase : Optional[Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
lowerCamelCase : Dict =False
lowerCamelCase : Tuple =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : List[str] =False
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = TimmBackboneModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = """resnet18"""
__lowerCAmelCase : Optional[Any] = """microsoft/resnet-18"""
__lowerCAmelCase : Dict = AutoBackbone.from_pretrained(lowerCAmelCase , use_timm_backbone=lowerCAmelCase )
__lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(lowerCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase : Tuple = AutoBackbone.from_pretrained(lowerCAmelCase , use_timm_backbone=lowerCAmelCase , out_indices=[1, 2, 3] )
__lowerCAmelCase : Optional[Any] = AutoBackbone.from_pretrained(lowerCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = model_class(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : str = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase : List[Any] = self.all_model_classes[0]
__lowerCAmelCase : Dict = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = model(**lowerCAmelCase )
__lowerCAmelCase : Dict = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase : Union[str, Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase : Dict = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : int = model(**lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase : Any = copy.deepcopy(lowerCAmelCase )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Dict = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(**lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase : Any = copy.deepcopy(lowerCAmelCase )
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Union[str, Any] = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(**lowerCAmelCase )
| 139
|
import numpy as np
import qiskit
def snake_case_ (__A : int = 8 , __A : int | None = None ) -> str:
__lowerCAmelCase : List[Any] = np.random.default_rng(seed=__A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
__lowerCAmelCase : List[Any] = rng.integers(2 , size=__A )
# The set of states Alice will prepare.
__lowerCAmelCase : List[str] = rng.integers(2 , size=__A )
# Measurement basis for Bob's qubits.
__lowerCAmelCase : List[Any] = rng.integers(2 , size=__A )
# Quantum Circuit to simulate BB84
__lowerCAmelCase : int = qiskit.QuantumCircuit(__A , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__A ):
if alice_state[index] == 1:
bbaa_circ.x(__A )
if alice_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__A ):
if bob_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowerCAmelCase : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowerCAmelCase : Optional[Any] = qiskit.execute(__A , __A , shots=1 , seed_simulator=__A )
# Returns the result of measurement.
__lowerCAmelCase : List[Any] = job.result().get_counts(__A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowerCAmelCase : Optional[int] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__A , __A , __A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowerCAmelCase : Tuple = gen_key[:key_len] if len(__A ) >= key_len else gen_key.ljust(__A , """0""" )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 139
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_55 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
lowercase_ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = do_resize
lowercase_ : Dict = size
lowercase_ : Optional[Any] = resample
lowercase_ : Tuple = do_rescale
lowercase_ : str = rescale_factor
lowercase_ : Union[str, Any] = do_normalize
lowercase_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ : Dict = do_convert_rgb
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
lowercase_ : Any = (size['''height'''], size['''width'''])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Dict = resample if resample is not None else self.resample
lowercase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase_ : List[Any] = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ : List[Any] = size if size is not None else self.size
lowercase_ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ : int = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
lowercase_ : Union[str, Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowercase_ : List[Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase_ : Any = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowercase_ : str = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : List[str] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : Dict = BatchFeature(data={'''pixel_values''': images} , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_outputs
| 93
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Tuple=3_2 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : List[str]=1_0 , __UpperCamelCase : List[Any]=[8, 1_6, 3_2, 6_4] , __UpperCamelCase : List[Any]=[1, 1, 2, 1] , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]="relu" , __UpperCamelCase : int=3 , __UpperCamelCase : Dict=None , __UpperCamelCase : Any=["stage2", "stage3", "stage4"] , __UpperCamelCase : Dict=[2, 3, 4] , __UpperCamelCase : Union[str, Any]=1 , )->Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
_UpperCAmelCase = num_groups
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] )->Tuple:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase__ ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple )->Union[str, Any]:
_UpperCAmelCase = BitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : int )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = BitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] )->Dict:
_UpperCAmelCase = BitBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = BitBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase__ ( self : Any )->Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Any )->List[Any]:
_UpperCAmelCase = BitModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowercase__ ( self : Tuple )->Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[Any] )->List[str]:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def lowercase__ ( self : Union[str, Any] )->int:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def lowercase__ ( self : Any )->Any:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def lowercase__ ( self : Any )->Any:
pass
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCamelCase )
def lowercase__ ( self : Any )->Optional[int]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
for name, module in model.named_modules():
if isinstance(__UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
def lowercase__ ( self : Tuple )->Dict:
def check_hidden_states_output(__UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def lowercase__ ( self : int )->Union[str, Any]:
pass
def lowercase__ ( self : List[str] )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = BitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Optional[int] )->Any:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
@require_torch
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def lowercase__ ( self : int )->Optional[int]:
_UpperCAmelCase = BitModelTester(self )
| 326
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326
| 1
|
import random
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : Optional[Any] = 0
while s % 2 == 0:
UpperCAmelCase_ : List[Any] = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase_ : List[str] = random.randrange(2 , num - 1 )
UpperCAmelCase_ : Optional[Any] = pow(_a , _a , _a )
if v != 1:
UpperCAmelCase_ : Optional[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase_ : Optional[Any] = i + 1
UpperCAmelCase_ : Optional[int] = (v**2) % num
return True
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
if num < 2:
return False
UpperCAmelCase_ : List[str] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_a )
def lowerCamelCase_ ( _a : int = 1024 ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_a ):
return num
if __name__ == "__main__":
UpperCamelCase_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 345
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345
| 1
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A ( _a ):
lowercase_ = ['vqvae']
def __init__( self : int , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Mel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , mel=lowerCAmelCase_ , vqvae=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , lowerCAmelCase_ ) else 10_00
@torch.no_grad()
def __call__( self : Optional[Any] , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = None , lowerCAmelCase_ : np.ndarray = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = None , lowerCAmelCase_ : torch.Generator = None , lowerCAmelCase_ : float = 0 , lowerCAmelCase_ : float = 0 , lowerCAmelCase_ : torch.Generator = None , lowerCAmelCase_ : float = 0 , lowerCAmelCase_ : torch.Tensor = None , lowerCAmelCase_ : torch.Tensor = None , lowerCAmelCase_ : Any=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_a = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase_ )
_a = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_a = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_a = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase_ , device=self.device , )
_a = noise
_a = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.mel.audio_slice_to_image(lowerCAmelCase_ )
_a = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_a = (input_image / 2_55) * 2 - 1
_a = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_a = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase_ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase_ )[0]
_a = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_a = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , self.scheduler.timesteps[start_step - 1] )
_a = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_a = int(mask_start_secs * pixels_per_second )
_a = int(mask_end_secs * pixels_per_second )
_a = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase_ ):
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )['''sample''']
else:
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ )['''sample''']
if isinstance(self.scheduler , lowerCAmelCase_ ):
_a = self.scheduler.step(
model_output=lowerCAmelCase_ , timestep=lowerCAmelCase_ , sample=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , )['''prev_sample''']
else:
_a = self.scheduler.step(
model_output=lowerCAmelCase_ , timestep=lowerCAmelCase_ , sample=lowerCAmelCase_ , generator=lowerCAmelCase_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_a = mask[:, step, :, :mask_start]
if mask_end > 0:
_a = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_a = 1 / self.vqvae.config.scaling_factor * images
_a = self.vqvae.decode(lowerCAmelCase_ )['''sample''']
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_a = (images * 2_55).round().astype('''uint8''' )
_a = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_a = [self.mel.image_to_audio(lowerCAmelCase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase_ ) )
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Image.Image] , lowerCAmelCase_ : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , lowerCAmelCase_ )
self.scheduler.set_timesteps(lowerCAmelCase_ )
_a = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_a = (sample / 2_55) * 2 - 1
_a = torch.Tensor(lowerCAmelCase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_a = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_a = self.scheduler.alphas_cumprod[t]
_a = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_a = 1 - alpha_prod_t
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ )['''sample''']
_a = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_a = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_a = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : float ) -> torch.Tensor:
"""simple docstring"""
_a = acos(torch.dot(torch.flatten(lowerCAmelCase_ ) , torch.flatten(lowerCAmelCase_ ) ) / torch.norm(lowerCAmelCase_ ) / torch.norm(lowerCAmelCase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase_ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase_ )
| 179
|
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A ( _a ):
lowercase_ = 42
lowercase_ = jnp.floataa
lowercase_ = True
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().setup()
_a = nn.Dense(5 , dtype=self.dtype )
def __call__( self : List[Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
_a = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A ( _a ):
lowercase_ = FlaxBigBirdForNaturalQuestionsModule
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
def cross_entropy(UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=None ):
_a = logits.shape[-1]
_a = (labels[..., None] == jnp.arange(UpperCamelCase )[None]).astype('''f4''' )
_a = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
_a = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_a = reduction(UpperCamelCase )
return loss
_a = partial(UpperCamelCase , reduction=jnp.mean )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A :
lowercase_ = "google/bigbird-roberta-base"
lowercase_ = 3000
lowercase_ = 1_0500
lowercase_ = 128
lowercase_ = 3
lowercase_ = 1
lowercase_ = 5
# tx_args
lowercase_ = 3e-5
lowercase_ = 0.0
lowercase_ = 2_0000
lowercase_ = 0.0095
lowercase_ = "bigbird-roberta-natural-questions"
lowercase_ = "training-expt"
lowercase_ = "data/nq-training.jsonl"
lowercase_ = "data/nq-validation.jsonl"
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=lowerCAmelCase_ )
_a = os.path.join(self.base_dir , self.save_dir )
_a = self.batch_size_per_device * jax.device_count()
@dataclass
class A :
lowercase_ = 42
lowercase_ = 4096 # no dynamic padding on TPUs
def __call__( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = self.collate_fn(lowerCAmelCase_ )
_a = jax.tree_util.tree_map(lowerCAmelCase_ , lowerCAmelCase_ )
return batch
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
_a , _a = self.fetch_inputs(features['''input_ids'''] )
_a = {
'''input_ids''': jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : list ) -> List[Any]:
"""simple docstring"""
_a = [self._fetch_inputs(lowerCAmelCase_ ) for ids in input_ids]
return zip(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : list ) -> str:
"""simple docstring"""
_a = [1 for _ in range(len(lowerCAmelCase_ ) )]
while len(lowerCAmelCase_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Dict=None ):
'''simple docstring'''
if seed is not None:
_a = dataset.shuffle(seed=UpperCamelCase )
for i in range(len(UpperCamelCase ) // batch_size ):
_a = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase )
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , **UpperCamelCase : str ):
'''simple docstring'''
def loss_fn(UpperCamelCase : List[str] ):
_a = model_inputs.pop('''start_labels''' )
_a = model_inputs.pop('''end_labels''' )
_a = model_inputs.pop('''pooled_labels''' )
_a = state.apply_fn(**UpperCamelCase , params=UpperCamelCase , dropout_rng=UpperCamelCase , train=UpperCamelCase )
_a , _a , _a = outputs
return state.loss_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
_a , _a = jax.random.split(UpperCamelCase )
_a = jax.value_and_grad(UpperCamelCase )
_a , _a = grad_fn(state.params )
_a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_a = jax.lax.pmean(UpperCamelCase , '''batch''' )
_a = state.apply_gradients(grads=UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_ (UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = model_inputs.pop('''start_labels''' )
_a = model_inputs.pop('''end_labels''' )
_a = model_inputs.pop('''pooled_labels''' )
_a = state.apply_fn(**UpperCamelCase , params=state.params , train=UpperCamelCase )
_a , _a , _a = outputs
_a = state.loss_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class A ( train_state.TrainState ):
lowercase_ = struct.field(pytree_node=_a )
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=None ) -> List[str]:
"""simple docstring"""
_a = model.params
_a = TrainState.create(
apply_fn=model.__call__ , params=lowerCAmelCase_ , tx=lowerCAmelCase_ , loss_fn=lowerCAmelCase_ , )
if ckpt_dir is not None:
_a , _a , _a , _a , _a = restore_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_a , _a = build_tx(**lowerCAmelCase_ )
_a = train_state.TrainState(
step=lowerCAmelCase_ , apply_fn=model.__call__ , params=lowerCAmelCase_ , tx=lowerCAmelCase_ , opt_state=lowerCAmelCase_ , )
_a = args
_a = data_collator
_a = lr
_a = params
_a = jax_utils.replicate(lowerCAmelCase_ )
return state
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
_a = self.args
_a = len(lowerCAmelCase_ ) // args.batch_size
_a = jax.random.PRNGKey(0 )
_a = jax.random.split(lowerCAmelCase_ , jax.device_count() )
for epoch in range(args.max_epochs ):
_a = jnp.array(0 , dtype=jnp.floataa )
_a = get_batched_dataset(lowerCAmelCase_ , args.batch_size , seed=lowerCAmelCase_ )
_a = 0
for batch in tqdm(lowerCAmelCase_ , total=lowerCAmelCase_ , desc=F'Running EPOCH-{epoch}' ):
_a = self.data_collator(lowerCAmelCase_ )
_a , _a , _a = self.train_step_fn(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_a = jax_utils.unreplicate(state.step )
_a = running_loss.item() / i
_a = self.scheduler_fn(state_step - 1 )
_a = self.evaluate(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowerCAmelCase_ ) )
self.logger.log(lowerCAmelCase_ , commit=lowerCAmelCase_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
_a = get_batched_dataset(lowerCAmelCase_ , self.args.batch_size )
_a = len(lowerCAmelCase_ ) // self.args.batch_size
_a = jnp.array(0 , dtype=jnp.floataa )
_a = 0
for batch in tqdm(lowerCAmelCase_ , total=lowerCAmelCase_ , desc='''Evaluating ... ''' ):
_a = self.data_collator(lowerCAmelCase_ )
_a = self.val_step_fn(lowerCAmelCase_ , **lowerCAmelCase_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
_a = jax_utils.unreplicate(lowerCAmelCase_ )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=''' ... ''' )
self.model_save_fn(lowerCAmelCase_ , params=state.params )
with open(os.path.join(lowerCAmelCase_ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCAmelCase_ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(lowerCAmelCase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCAmelCase_ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , lowerCAmelCase_ )
print('''DONE''' )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=''' ... ''' )
with open(os.path.join(UpperCamelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_a = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_a = from_bytes(state.opt_state , f.read() )
_a = joblib.load(os.path.join(UpperCamelCase , '''args.joblib''' ) )
_a = joblib.load(os.path.join(UpperCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(UpperCamelCase , '''training_state.json''' ) , '''r''' ) as f:
_a = json.load(UpperCamelCase )
_a = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = num_train_steps - warmup_steps
_a = optax.linear_schedule(init_value=UpperCamelCase , end_value=UpperCamelCase , transition_steps=UpperCamelCase )
_a = optax.linear_schedule(init_value=UpperCamelCase , end_value=1e-7 , transition_steps=UpperCamelCase )
_a = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
def weight_decay_mask(UpperCamelCase : Dict ):
_a = traverse_util.flatten_dict(UpperCamelCase )
_a = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase )
_a = scheduler_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = optax.adamw(learning_rate=UpperCamelCase , weight_decay=UpperCamelCase , mask=UpperCamelCase )
return tx, lr
| 179
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ort.SessionOptions()
__UpperCAmelCase : Tuple = False
return options
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__UpperCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__UpperCAmelCase : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = """A red cat sitting on a park bench"""
__UpperCAmelCase : List[str] = np.random.RandomState(0 )
__UpperCAmelCase : List[str] = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase , output_type="""np""" , )
__UpperCAmelCase : int = output.images
__UpperCAmelCase : int = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__UpperCAmelCase : List[str] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__UpperCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__UpperCAmelCase : List[str] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__UpperCAmelCase : Dict = """A red cat sitting on a park bench"""
__UpperCAmelCase : List[str] = np.random.RandomState(0 )
__UpperCAmelCase : Optional[int] = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase , output_type="""np""" , )
__UpperCAmelCase : Optional[int] = output.images
__UpperCAmelCase : List[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 115
|
"""simple docstring"""
import numpy as np
UpperCAmelCase : Optional[Any] = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.array(UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = np.where(letter == self.SQUARE )
__UpperCAmelCase : Optional[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCamelCase__ ( self : Any , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : str = message.lower()
__UpperCAmelCase : List[Any] = message.replace(""" """ , """""" )
__UpperCAmelCase : List[Any] = message.replace("""j""" , """i""" )
__UpperCAmelCase : Optional[int] = np.empty((2, len(UpperCamelCase )) )
for letter_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : List[Any] = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : str = numbers[0]
__UpperCAmelCase : int = numbers[1]
__UpperCAmelCase : Union[str, Any] = first_step.reshape(2 * len(UpperCamelCase ) )
__UpperCAmelCase : Optional[Any] = """"""
for numbers_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Any = int(second_step[numbers_index * 2] )
__UpperCAmelCase : Any = int(second_step[(numbers_index * 2) + 1] )
__UpperCAmelCase : str = self.numbers_to_letter(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = encoded_message + letter
return encoded_message
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = message.lower()
message.replace(""" """ , """""" )
__UpperCAmelCase : int = np.empty(2 * len(UpperCamelCase ) )
for letter_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Any = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : Any = numbers[0]
__UpperCAmelCase : Dict = numbers[1]
__UpperCAmelCase : str = first_step.reshape((2, len(UpperCamelCase )) )
__UpperCAmelCase : Union[str, Any] = """"""
for numbers_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Optional[int] = int(second_step[0, numbers_index] )
__UpperCAmelCase : Tuple = int(second_step[1, numbers_index] )
__UpperCAmelCase : Tuple = self.numbers_to_letter(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = decoded_message + letter
return decoded_message
| 115
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCAmelCase : List[str] = None
try:
import msvcrt
except ImportError:
_UpperCAmelCase : Tuple = None
try:
import fcntl
except ImportError:
_UpperCAmelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCAmelCase : Tuple = OSError
# Data
# ------------------------------------------------
_UpperCAmelCase : Optional[int] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_UpperCAmelCase : Optional[Any] = "3.0.12"
_UpperCAmelCase : int = None
def UpperCAmelCase__ ( ):
global _logger
lowercase :List[str] = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: Dict ):
lowercase :Any = lock_file
return None
def __str__( self: Dict ):
lowercase :str = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class __lowerCAmelCase :
def __init__( self: Tuple , _lowerCAmelCase: Any ):
lowercase :Optional[Any] = lock
return None
def __enter__( self: List[Any] ):
return self.lock
def __exit__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] ):
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Tuple=-1 , _lowerCAmelCase: int=None ):
lowercase :Any = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
lowercase :int = self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase )
# The path to the lock file.
lowercase :List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase :Any = None
# The default timeout value.
lowercase :Any = timeout
# We use this lock primarily for the lock counter.
lowercase :Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase :Optional[int] = 0
return None
@property
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return self._lock_file
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ):
lowercase :Tuple = float(_lowerCAmelCase )
return None
def SCREAMING_SNAKE_CASE ( self: int ):
raise NotImplementedError()
def SCREAMING_SNAKE_CASE ( self: int ):
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Union[str, Any]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase :List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase :Any = id(self )
lowercase :Optional[int] = self._lock_file
lowercase :Optional[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(_lowerCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase :Union[str, Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Tuple=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase :Union[str, Any] = id(self )
lowercase :str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
lowercase :List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self: Tuple ):
self.acquire()
return self
def __exit__( self: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict ):
self.release()
return None
def __del__( self: Optional[Any] ):
self.release(force=_lowerCAmelCase )
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: int ):
lowercase :Union[str, Any] = os.path.basename(_lowerCAmelCase )
if len(_lowerCAmelCase ) > max_length and max_length > 0:
lowercase :Dict = os.path.dirname(_lowerCAmelCase )
lowercase :Any = str(hash(_lowerCAmelCase ) )
lowercase :Union[str, Any] = filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(_lowerCAmelCase , _lowerCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any]=-1 , _lowerCAmelCase: List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
lowercase :Optional[int] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase :Tuple = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_lowerCAmelCase )
else:
lowercase :Any = fd
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Any = self._lock_file_fd
lowercase :Tuple = None
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(_lowerCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=-1 , _lowerCAmelCase: Tuple=None ):
lowercase :List[str] = os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase :Optional[int] = os.open(self._lock_file , _lowerCAmelCase )
try:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_lowerCAmelCase )
else:
lowercase :Optional[Any] = fd
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase :Dict = self._lock_file_fd
lowercase :Union[str, Any] = None
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN )
os.close(_lowerCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase):
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase :List[Any] = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
lowercase :int = fd
return None
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
os.close(self._lock_file_fd )
lowercase :int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCAmelCase : Tuple = None
if msvcrt:
_UpperCAmelCase : str = WindowsFileLock
elif fcntl:
_UpperCAmelCase : List[Any] = UnixFileLock
else:
_UpperCAmelCase : Optional[int] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 158
|
import pytest
_UpperCAmelCase : List[Any] = "__dummy_dataset1__"
_UpperCAmelCase : Union[str, Any] = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCAmelCase__ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase__ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = dataset_loading_script_name
lowercase :Dict = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowercase :int = script_dir / F"{script_name}.py"
with open(lowerCamelCase, "w" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 158
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
A_ = random.Random()
def A_ ( snake_case , snake_case=1.0 , snake_case=None , snake_case=None ):
if rng is None:
SCREAMING_SNAKE_CASE:Tuple = global_rng
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=7 ,SCREAMING_SNAKE_CASE__ : Dict=400 ,SCREAMING_SNAKE_CASE__ : List[str]=2_000 ,SCREAMING_SNAKE_CASE__ : List[Any]=24 ,SCREAMING_SNAKE_CASE__ : Optional[int]=24 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=16_000 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,):
SCREAMING_SNAKE_CASE:Optional[Any] = parent
SCREAMING_SNAKE_CASE:List[str] = batch_size
SCREAMING_SNAKE_CASE:List[str] = min_seq_length
SCREAMING_SNAKE_CASE:List[str] = max_seq_length
SCREAMING_SNAKE_CASE:Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE:List[str] = feature_size
SCREAMING_SNAKE_CASE:Any = num_mel_bins
SCREAMING_SNAKE_CASE:List[str] = padding_value
SCREAMING_SNAKE_CASE:Tuple = sampling_rate
SCREAMING_SNAKE_CASE:Dict = return_attention_mask
SCREAMING_SNAKE_CASE:Optional[Any] = do_normalize
def __UpperCamelCase ( self : Any ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : List[Any]=False ):
def _flatten(SCREAMING_SNAKE_CASE__ : Optional[int] ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) )
if equal_length:
SCREAMING_SNAKE_CASE:Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE:Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE:Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( _a , unittest.TestCase ):
_A : Union[str, Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = SpeechaTextFeatureExtractionTester(self )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any] ):
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE__ ,axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE__ ,axis=0 ) - 1 ) < 1e-3 ) )
def __UpperCamelCase ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE:Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE:Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
SCREAMING_SNAKE_CASE:List[str] = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE:Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE:Optional[Any] = feature_extractor(speech_inputs[0] ,return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE:Optional[Any] = feature_extractor(np_speech_inputs[0] ,return_tensors="np" ).input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE:int = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE:List[Any] = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE:List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE:Dict = np.asarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE:List[str] = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 ) )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE:int = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
SCREAMING_SNAKE_CASE:str = ["longest", "max_length", "do_not_pad"]
SCREAMING_SNAKE_CASE:str = [None, 16, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:List[str] = feature_extractor(
SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = inputs.input_features
SCREAMING_SNAKE_CASE:List[str] = inputs.attention_mask
SCREAMING_SNAKE_CASE:Union[str, Any] = [np.sum(SCREAMING_SNAKE_CASE__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE:Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
SCREAMING_SNAKE_CASE:Any = ["longest", "max_length", "do_not_pad"]
SCREAMING_SNAKE_CASE:Optional[int] = [None, 16, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Any = feature_extractor(
SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,return_tensors="np" ,return_attention_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = inputs.input_features
SCREAMING_SNAKE_CASE:List[str] = inputs.attention_mask
SCREAMING_SNAKE_CASE:Dict = [np.sum(SCREAMING_SNAKE_CASE__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE:int = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
SCREAMING_SNAKE_CASE:Optional[Any] = feature_extractor(
SCREAMING_SNAKE_CASE__ ,padding="max_length" ,max_length=4 ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors="np" ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:int = inputs.input_features
SCREAMING_SNAKE_CASE:List[str] = inputs.attention_mask
SCREAMING_SNAKE_CASE:Optional[Any] = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE:Optional[Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
SCREAMING_SNAKE_CASE:Any = feature_extractor(
SCREAMING_SNAKE_CASE__ ,padding="longest" ,max_length=4 ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors="np" ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:List[Any] = inputs.input_features
SCREAMING_SNAKE_CASE:Dict = inputs.attention_mask
SCREAMING_SNAKE_CASE:List[str] = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 4, 24) )
SCREAMING_SNAKE_CASE:List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
SCREAMING_SNAKE_CASE:Any = feature_extractor(
SCREAMING_SNAKE_CASE__ ,padding="longest" ,max_length=16 ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors="np" ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:str = inputs.input_features
SCREAMING_SNAKE_CASE:Union[str, Any] = inputs.attention_mask
SCREAMING_SNAKE_CASE:List[Any] = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 6, 24) )
def __UpperCamelCase ( self : int ):
import torch
SCREAMING_SNAKE_CASE:Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE:Optional[int] = np.random.rand(100 ,32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE:str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE:Dict = feature_extractor.pad([{"input_features": inputs}] ,return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE:Dict = feature_extractor.pad([{"input_features": inputs}] ,return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : str ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE:List[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" ,"clean" ,split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE:List[str] = ds.sort("id" ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : str ):
# fmt: off
SCREAMING_SNAKE_CASE:List[str] = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
SCREAMING_SNAKE_CASE:Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE:Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE:List[Any] = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors="pt" ).input_features
self.assertEquals(input_features.shape ,(1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 ) )
| 139
|
'''simple docstring'''
import numpy
# List of input, output pairs
A_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
A_ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
A_ = [2, 4, 1, 5]
A_ = len(train_data)
A_ = 0.009
def A_ ( snake_case , snake_case="train" ):
return calculate_hypothesis_value(snake_case , snake_case ) - output(
snake_case , snake_case )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = 0
for i in range(len(snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A_ ( snake_case , snake_case ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A_ ( snake_case , snake_case ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A_ ( snake_case , snake_case=m ):
SCREAMING_SNAKE_CASE:Dict = 0
for i in range(snake_case ):
if index == -1:
summation_value += _error(snake_case )
else:
summation_value += _error(snake_case ) * train_data[i][0][index]
return summation_value
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = summation_of_cost_derivative(snake_case , snake_case ) / m
return cost_derivative_value
def A_ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE:List[str] = 0.00_0002
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
while True:
j += 1
SCREAMING_SNAKE_CASE:List[str] = [0, 0, 0, 0]
for i in range(0 , len(snake_case ) ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE:Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case , snake_case , atol=snake_case , rtol=snake_case , ):
break
SCREAMING_SNAKE_CASE:List[str] = temp_parameter_vector
print(("Number of iterations:", j) )
def A_ ( ):
for i in range(len(snake_case ) ):
print(("Actual output value:", output(snake_case , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(snake_case , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 139
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
debug_launcher(test_script.main )
def lowercase__ ( self : Dict ):
debug_launcher(test_ops.main )
| 365
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[8, 16, 32, 64] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=[2, 3, 4] , UpperCAmelCase=1 , ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = parent
__snake_case : List[str] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : Tuple = num_channels
__snake_case : List[Any] = embeddings_size
__snake_case : Optional[Any] = hidden_sizes
__snake_case : List[Any] = depths
__snake_case : Union[str, Any] = is_training
__snake_case : str = use_labels
__snake_case : Dict = hidden_act
__snake_case : Any = num_labels
__snake_case : Union[str, Any] = scope
__snake_case : Optional[Any] = len(UpperCAmelCase )
__snake_case : int = out_features
__snake_case : Optional[Any] = out_indices
__snake_case : Union[str, Any] = num_groups
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = BitModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__snake_case : Optional[Any] = model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.num_labels
__snake_case : Optional[Any] = BitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__snake_case : Optional[int] = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = BitBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : int = None
__snake_case : Tuple = BitBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__snake_case : str = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : int = config_and_inputs
__snake_case : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase_ : Tuple =(
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ : List[Any] =False
UpperCAmelCase_ : str =False
UpperCAmelCase_ : List[Any] =False
UpperCAmelCase_ : Union[str, Any] =False
UpperCAmelCase_ : Dict =False
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = BitModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason="Bit does not output attentions" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(config=UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__snake_case : Optional[int] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__snake_case : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : Optional[Any] = layer_type
__snake_case : Union[str, Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = BitModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowerCAmelCase__( ) -> str:
__snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase )
__snake_case : List[str] = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Dict = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**UpperCAmelCase )
# verify the logits
__snake_case : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__snake_case : Optional[Any] = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
@require_torch
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] =(BitBackbone,) if is_torch_available() else ()
UpperCAmelCase_ : Union[str, Any] =BitConfig
UpperCAmelCase_ : Union[str, Any] =False
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = BitModelTester(self )
| 326
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
_UpperCamelCase = {
'''camembert-base''': 512,
}
_UpperCamelCase = '''▁'''
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : str =["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
__snake_case : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
__snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__snake_case : Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__snake_case : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__snake_case : Optional[int] = len(self.fairseq_tokens_to_ids )
__snake_case : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__snake_case : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = []
__snake_case : Union[str, Any] = ""
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase ) + token
__snake_case : List[Any] = True
__snake_case : Union[str, Any] = []
else:
current_sub_tokens.append(UpperCAmelCase )
__snake_case : int = False
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.__dict__.copy()
__snake_case : Optional[Any] = None
return state
def __setstate__( self , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : List[str] = {}
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : Optional[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , "wb" ) as fi:
__snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 326
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
__lowerCamelCase : int = get_failure_array(lowerCamelCase__ )
# 2) Step through text searching for pattern
__lowerCamelCase , __lowerCamelCase : Any = 0, 0 # index into text, pattern
while i < len(lowerCamelCase__ ):
if pattern[j] == text[i]:
if j == (len(lowerCamelCase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowerCamelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list[int]:
__lowerCamelCase : Dict = [0]
__lowerCamelCase : Any = 0
__lowerCamelCase : Tuple = 1
while j < len(lowerCamelCase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowerCamelCase : Any = failure[i - 1]
continue
j += 1
failure.append(lowerCamelCase__ )
return failure
if __name__ == "__main__":
# Test 1)
a ="""abc1abc12"""
a ="""alskfjaldsabc1abc1abc12k23adsfabcabc"""
a ="""alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
a ="""ABABX"""
a ="""ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
a ="""AAAB"""
a ="""ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
a ="""abcdabcy"""
a ="""abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
a ="""aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 113
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 113
| 1
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case_ : int ,snake_case_ : Any ) ->List[Any]:
'''simple docstring'''
__A : List[str] = XCLIPTextConfig()
# derive patch size from model name
__A : List[Any] = model_name.find('''patch''' )
__A : List[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__A : Dict = XCLIPVisionConfig(patch_size=snake_case_ ,num_frames=snake_case_ )
if "large" in model_name:
__A : Dict = 768
__A : Dict = 3072
__A : Any = 12
__A : str = 1024
__A : str = 4096
__A : List[str] = 16
__A : Union[str, Any] = 24
__A : Optional[int] = 768
__A : List[str] = 3072
if model_name == "xclip-large-patch14-16-frames":
__A : int = 336
__A : str = XCLIPConfig.from_text_vision_configs(snake_case_ ,snake_case_ )
if "large" in model_name:
__A : Union[str, Any] = 768
return config
def __lowercase ( snake_case_ : List[str] ) ->str:
'''simple docstring'''
if name == "token_embedding.weight":
__A : str = name.replace('''token_embedding.weight''' ,'''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__A : Optional[Any] = name.replace('''positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__A : Tuple = name.replace('''ln_1''' ,'''layer_norm1''' )
if "ln_2" in name:
__A : Dict = name.replace('''ln_2''' ,'''layer_norm2''' )
if "c_fc" in name:
__A : Any = name.replace('''c_fc''' ,'''fc1''' )
if "c_proj" in name:
__A : str = name.replace('''c_proj''' ,'''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__A : int = name.replace('''transformer.resblocks''' ,'''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__A : Dict = name.replace('''attn.out_proj''' ,'''self_attn.out_proj''' )
if "ln_final" in name:
__A : Union[str, Any] = name.replace('''ln_final''' ,'''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__A : List[str] = name.replace('''visual.class_embedding''' ,'''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__A : str = name.replace('''visual.positional_embedding''' ,'''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__A : str = name.replace('''visual.transformer.resblocks''' ,'''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__A : Optional[Any] = name.replace('''visual.conv1''' ,'''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__A : int = name.replace('''visual.ln_pre''' ,'''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__A : int = name.replace('''visual.ln_post''' ,'''vision_model.post_layernorm''' )
if "visual.proj" in name:
__A : int = name.replace('''visual.proj''' ,'''visual_projection.weight''' )
if "text_projection" in name:
__A : Union[str, Any] = name.replace('''text_projection''' ,'''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__A : Optional[int] = name.replace('''prompts_visual_proj''' ,'''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__A : Tuple = name.replace('''prompts_visual_ln''' ,'''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__A : Union[str, Any] = name.replace('''positional''' ,'''position''' )
if name.startswith('''mit.resblocks''' ):
__A : Any = name.replace('''mit.resblocks''' ,'''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__A : str = name.replace('''prompts_generator.norm''' ,'''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : Dict ) ->List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__A : int = orig_state_dict.pop(snake_case_ )
if "attn.in_proj" in key:
__A : Tuple = key.split('''.''' )
if key.startswith('''visual''' ):
__A : Any = key_split[3]
__A : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__A : Optional[Any] = val[
:dim, :
]
__A : int = val[
dim : dim * 2, :
]
__A : Dict = val[
-dim:, :
]
else:
__A : Optional[int] = val[
:dim
]
__A : str = val[
dim : dim * 2
]
__A : List[Any] = val[
-dim:
]
else:
if "weight" in key:
__A : str = val[
:dim, :
]
__A : Tuple = val[
dim : dim * 2, :
]
__A : Dict = val[
-dim:, :
]
else:
__A : Optional[Any] = val[:dim]
__A : Union[str, Any] = val[
dim : dim * 2
]
__A : int = val[-dim:]
elif key.startswith('''mit''' ):
__A : List[str] = key_split[2]
__A : Optional[int] = config.vision_config.mit_hidden_size
if "weight" in key:
__A : List[str] = val[:dim, :]
__A : List[str] = val[dim : dim * 2, :]
__A : int = val[-dim:, :]
else:
__A : Tuple = val[:dim]
__A : Optional[Any] = val[dim : dim * 2]
__A : Tuple = val[-dim:]
else:
__A : Any = key_split[2]
__A : Optional[int] = config.text_config.hidden_size
if "weight" in key:
__A : Optional[Any] = val[:dim, :]
__A : Tuple = val[
dim : dim * 2, :
]
__A : Optional[int] = val[-dim:, :]
else:
__A : List[Any] = val[:dim]
__A : Dict = val[
dim : dim * 2
]
__A : str = val[-dim:]
else:
__A : List[Any] = rename_key(snake_case_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__A : Dict = val.T
__A : str = val
return orig_state_dict
def __lowercase ( snake_case_ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
if num_frames == 8:
__A : Optional[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__A : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__A : Dict = '''eating_spaghetti_32_frames.npy'''
__A : Optional[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename=snake_case_ ,repo_type='''dataset''' ,)
__A : Dict = np.load(snake_case_ )
return list(snake_case_ )
def __lowercase ( snake_case_ : int ,snake_case_ : Optional[Any]=None ,snake_case_ : Union[str, Any]=False ) ->int:
'''simple docstring'''
__A : Optional[Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__A : Tuple = model_to_url[model_name]
__A : Any = 8
if "16-frames" in model_name:
__A : Tuple = 16
elif "shot" in model_name:
__A : Optional[int] = 32
__A : Any = get_xclip_config(snake_case_ ,snake_case_ )
__A : List[str] = XCLIPModel(snake_case_ )
model.eval()
if "drive" in checkpoint_url:
__A : str = '''pytorch_model.bin'''
gdown.cached_download(snake_case_ ,snake_case_ ,quiet=snake_case_ )
__A : Optional[Any] = torch.load(snake_case_ ,map_location='''cpu''' )['''model''']
else:
__A : List[Any] = torch.hub.load_state_dict_from_url(snake_case_ )['''model''']
__A : List[str] = convert_state_dict(snake_case_ ,snake_case_ )
__A : int = XCLIPModel(snake_case_ )
__A , __A : Dict = model.load_state_dict(snake_case_ ,strict=snake_case_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__A : Dict = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__A : Union[str, Any] = VideoMAEImageProcessor(size=snake_case_ )
__A : Dict = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__A : Any = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__A : Optional[Any] = XCLIPProcessor(image_processor=snake_case_ ,tokenizer=snake_case_ )
__A : int = prepare_video(snake_case_ )
__A : Optional[Any] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] ,videos=snake_case_ ,return_tensors='''pt''' ,padding=snake_case_ )
print('''Shape of pixel values:''' ,inputs.pixel_values.shape )
with torch.no_grad():
__A : Optional[int] = model(**snake_case_ )
# Verify outputs
__A : Dict = outputs.logits_per_video
__A : Union[str, Any] = logits_per_video.softmax(dim=1 )
print('''Probs:''' ,snake_case_ )
# kinetics-400
if model_name == "xclip-base-patch32":
__A : int = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
__A : Any = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
__A : Union[str, Any] = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
__A : Tuple = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
__A : str = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
__A : List[str] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__A : List[str] = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__A : Dict = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__A : int = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__A : Union[str, Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__A : Optional[Any] = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__A : Any = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__A : Dict = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__A : Union[str, Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__A : Union[str, Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__A : Optional[Any] = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__A : Tuple = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__A : Union[str, Any] = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(snake_case_ ,snake_case_ ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case_ ,organization='''nielsr''' )
processor.push_to_hub(snake_case_ ,organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case_ ,organization='''nielsr''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 179
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""vqvae"""]
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , __lowerCamelCase ) else 1000
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ):
'''simple docstring'''
__A : Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
__A : Any = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__A : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__A : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
__A : Optional[int] = noise
__A : int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
__A : int = self.mel.audio_slice_to_image(__lowerCamelCase )
__A : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__A : Union[str, Any] = (input_image / 255) * 2 - 1
__A : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__A : Any = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
__A : Optional[Any] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__A : Union[str, Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
__A : Optional[int] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__A : int = int(mask_start_secs * pixels_per_second )
__A : Dict = int(mask_end_secs * pixels_per_second )
__A : int = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
__A : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )['''sample''']
else:
__A : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase )['''sample''']
if isinstance(self.scheduler , __lowerCamelCase ):
__A : Optional[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )['''prev_sample''']
else:
__A : Optional[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
__A : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
__A : Tuple = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__A : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
__A : List[str] = self.vqvae.decode(__lowerCamelCase )['''sample''']
__A : Any = (images / 2 + 0.5).clamp(0 , 1 )
__A : Any = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__A : Any = (images * 255).round().astype('''uint8''' )
__A : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
__A : Dict = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
__A : str = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__A : Any = (sample / 255) * 2 - 1
__A : Dict = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__A : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__A : List[Any] = self.scheduler.alphas_cumprod[t]
__A : Optional[int] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__A : List[Any] = 1 - alpha_prod_t
__A : int = self.unet(__lowerCamelCase , __lowerCamelCase )['''sample''']
__A : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__A : List[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__A : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 179
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _UpperCamelCase (a__ :Optional[Any] , a__ :Union[str, Any] , a__ :str , a__ :List[str] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
UpperCamelCase__ = TOKENIZER_CLASSES
else:
UpperCamelCase__ = {tokenizer_name: getattr(a__ , tokenizer_name + """Fast""" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
UpperCamelCase__ = TOKENIZER_CLASSES[tokenizer_name]
UpperCamelCase__ = True
if checkpoint_name is None:
UpperCamelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCamelCase__ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
UpperCamelCase__ = tokenizer_class.from_pretrained(a__ , force_download=a__ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCamelCase__ , UpperCamelCase__ = checkpoint.split("""/""" )
UpperCamelCase__ = os.path.join(a__ , a__ )
elif add_prefix:
UpperCamelCase__ = checkpoint
UpperCamelCase__ = dump_path
else:
UpperCamelCase__ = None
UpperCamelCase__ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCamelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCamelCase__ = file_path.split(a__ )[-1][0]
if next_char == "/":
UpperCamelCase__ = os.path.join(a__ , a__ )
UpperCamelCase__ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
UpperCamelCase__ = tokenizer.save_pretrained(
a__ , legacy_format=a__ , filename_prefix=a__ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(a__ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
UpperCamelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 351
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87
| 0
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
while a != 0:
_lowerCAmelCase , _lowerCAmelCase = b % a, a
return b
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) != 1:
_lowerCAmelCase = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1, 0, a
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 1, m
while va != 0:
_lowerCAmelCase = ua // va
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 158
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=14 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.02 , ) -> Any:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = rotary_dim
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = None
_lowerCAmelCase = vocab_size - 1
_lowerCAmelCase = vocab_size - 1
_lowerCAmelCase = vocab_size - 1
def _snake_case ( self ) -> str:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(_lowerCAmelCase )
_lowerCAmelCase = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
_lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(_lowerCAmelCase )
_lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_lowerCAmelCase = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = FlaxGPTJModelTester(self )
def _snake_case ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> int:
for model_class_name in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@tooslow
def _snake_case ( self ) -> Any:
_lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
_lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
_lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase = False
_lowerCAmelCase = model.config.eos_token_id
_lowerCAmelCase = jax.jit(model.generate )
_lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
_lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@is_pt_flax_cross_test
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = pt_inputs["input_ids"].shape
_lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = pt_model_class(_lowerCAmelCase ).eval()
_lowerCAmelCase = model_class(_lowerCAmelCase , dtype=jnp.floataa )
_lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCAmelCase )
_lowerCAmelCase = fx_state
with torch.no_grad():
_lowerCAmelCase = pt_model(**_lowerCAmelCase ).to_tuple()
_lowerCAmelCase = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model_class.from_pretrained(_lowerCAmelCase , from_pt=_lowerCAmelCase )
_lowerCAmelCase = fx_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _snake_case ( self ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = pt_model_class(_lowerCAmelCase ).eval()
_lowerCAmelCase = model_class(_lowerCAmelCase , dtype=jnp.floataa )
_lowerCAmelCase = load_flax_weights_in_pytorch_model(_lowerCAmelCase , fx_model.params )
_lowerCAmelCase , _lowerCAmelCase = pt_inputs["input_ids"].shape
_lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_lowerCAmelCase = pt_model(**_lowerCAmelCase ).to_tuple()
_lowerCAmelCase = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = pt_model_class.from_pretrained(_lowerCAmelCase , from_flax=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = pt_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _snake_case ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 158
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a_ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a_ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowercase ( snake_case_ : str ) ->dict[str, int]:
'''simple docstring'''
__A : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowercase ( snake_case_ : tuple ) ->str:
'''simple docstring'''
return x[0]
def __lowercase ( snake_case_ : str ) ->str:
'''simple docstring'''
__A : Union[str, Any] = get_letter_count(snake_case_ )
__A : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(snake_case_ )
__A : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=snake_case_ )
__A : Optional[int] = ''''''.join(freq_to_letter[freq] )
__A : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=snake_case_ ,reverse=snake_case_ )
__A : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(snake_case_ )
def __lowercase ( snake_case_ : str ) ->int:
'''simple docstring'''
__A : Any = get_frequency_order(snake_case_ )
__A : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
| 0
|
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE :int = 'examples/'
SCREAMING_SNAKE_CASE :Dict = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
SCREAMING_SNAKE_CASE :Optional[Any] = 'README.md'
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.read()
__A , __A = REPLACE_PATTERNS[pattern]
__A = replace.replace("VERSION" , a_ )
__A = re_pattern.sub(a_ , a_ )
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(a_ )
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(a_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(a_ , a_ ) , a_ , pattern="examples" )
def UpperCAmelCase ( a_ , a_=False ) -> Any:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a_ , a_ , a_ )
if not patch:
update_version_in_examples(a_ )
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
__A = "🤗 Transformers currently provides the following architectures"
__A = "1. Want to contribute a new model?"
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.readlines()
# Find the start of the list.
__A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__A = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(a_ )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
__A = f.read()
__A = REPLACE_PATTERNS["init"][0].search(a_ ).groups()[0]
return packaging.version.parse(a_ )
def UpperCAmelCase ( a_=False ) -> Dict:
"""simple docstring"""
__A = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__A = default_version.base_version
elif patch:
__A = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__A = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__A = input(F'''Which version are you releasing? [{default_version}]''' )
if len(a_ ) == 0:
__A = default_version
print(F'''Updating version to {version}.''' )
global_version_update(a_ , patch=a_ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
__A = get_version()
__A = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__A = current_version.base_version
# Check with the user we got that right.
__A = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(a_ ) == 0:
__A = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(a_ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :str = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 15
|
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323
| 0
|
def snake_case_ (__A : str , __A : str ) -> bool:
__lowerCAmelCase : List[str] = len(__A ) + 1
__lowerCAmelCase : List[str] = len(__A ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCAmelCase : int = [[0 for i in range(__A )] for j in range(__A )]
# since string of zero length match pattern of zero length
__lowerCAmelCase : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __A ):
__lowerCAmelCase : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __A ):
__lowerCAmelCase : str = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __A ):
for j in range(1 , __A ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCAmelCase : Optional[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCAmelCase : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCAmelCase : List[Any] = dp[i - 1][j]
else:
__lowerCAmelCase : Any = 0
else:
__lowerCAmelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__UpperCAmelCase = """aab"""
__UpperCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}')
| 352
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__UpperCAmelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCAmelCase = concatenate_datasets
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadManager
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 139
| 0
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__UpperCamelCase = parse(importlib.metadata.version('''torch'''))
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Version] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
SCREAMING_SNAKE_CASE = STR_OPERATION_TO_FUNC[operation]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version(SCREAMING_SNAKE_CASE_ ) )
return operation(SCREAMING_SNAKE_CASE_ , parse(SCREAMING_SNAKE_CASE_ ) )
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> str:
return compare_versions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 113
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=lowerCAmelCase__ , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=lowerCAmelCase__ , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
def lowercase () -> str:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def lowercase () -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
@require_beam
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __A ( self ) -> int:
import apache_beam as beam
SCREAMING_SNAKE_CASE = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
SCREAMING_SNAKE_CASE = partial(lowerCAmelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __A ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = NestedBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 113
| 1
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :Tuple = 1.5
lowerCAmelCase__ :Optional[int] = int(factor * num_class_images )
lowerCAmelCase__ :str = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase__ :List[Any] = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCAmelCase__ :List[str] = int(factor * num_images )
lowerCAmelCase__ :int = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Any = 0
lowerCAmelCase__ :Union[str, Any] = tqdm(desc='downloading real regularization images' , total=_SCREAMING_SNAKE_CASE )
with open(F"{class_data_dir}/caption.txt" , 'w' ) as fa, open(F"{class_data_dir}/urls.txt" , 'w' ) as fa, open(
F"{class_data_dir}/images.txt" , 'w' ) as fa:
while total < num_class_images:
lowerCAmelCase__ :Tuple = class_images[count]
count += 1
try:
lowerCAmelCase__ :List[str] = requests.get(images['url'] )
if img.status_code == 200:
lowerCAmelCase__ :Any = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F"{class_data_dir}/images/{total}.jpg" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __A () ->Any:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = argparse.ArgumentParser('' , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('--class_data_dir' , help='path to save images' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
__A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 254
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A = random.Random()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
"""simple docstring"""
if rng is None:
lowerCAmelCase__ :int = global_rng
lowerCAmelCase__ :str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=2_0_0_0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=1_6_0 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=4_0_0_0 , __UpperCAmelCase=False , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :int = parent
lowerCAmelCase__ :Optional[int] = batch_size
lowerCAmelCase__ :Optional[Any] = min_seq_length
lowerCAmelCase__ :Optional[int] = max_seq_length
lowerCAmelCase__ :Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ :Union[str, Any] = padding_value
lowerCAmelCase__ :Optional[int] = sampling_rate
lowerCAmelCase__ :Optional[int] = return_attention_mask
lowerCAmelCase__ :Union[str, Any] = do_normalize
lowerCAmelCase__ :Any = feature_size
lowerCAmelCase__ :Union[str, Any] = chunk_length
lowerCAmelCase__ :List[Any] = hop_length
def snake_case ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case ( self , __UpperCAmelCase=False , __UpperCAmelCase=False ):
'''simple docstring'''
def _flatten(__UpperCAmelCase ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
lowerCAmelCase__ :Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ :Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ :Optional[int] = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = WhisperFeatureExtractionTester(self )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[Any] = feat_extract_first.save_pretrained(__UpperCAmelCase )[0]
check_json_file_has_correct_format(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.feature_extraction_class.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = feat_extract_first.to_dict()
lowerCAmelCase__ :List[Any] = feat_extract_second.to_dict()
lowerCAmelCase__ :int = feat_extract_first.mel_filters
lowerCAmelCase__ :Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[int] = os.path.join(__UpperCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(__UpperCAmelCase )
lowerCAmelCase__ :str = self.feature_extraction_class.from_json_file(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = feat_extract_first.to_dict()
lowerCAmelCase__ :List[Any] = feat_extract_second.to_dict()
lowerCAmelCase__ :str = feat_extract_first.mel_filters
lowerCAmelCase__ :Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ :List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase__ :int = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ :int = feature_extractor(__UpperCAmelCase , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ :Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCAmelCase__ :Dict = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test batched
lowerCAmelCase__ :Optional[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
lowerCAmelCase__ :Dict = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ :List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase__ :Optional[Any] = np.asarray(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
lowerCAmelCase__ :List[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test truncation required
lowerCAmelCase__ :Any = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
lowerCAmelCase__ :Any = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
lowerCAmelCase__ :str = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCAmelCase__ :Union[str, Any] = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs_truncated]
lowerCAmelCase__ :Any = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
lowerCAmelCase__ :int = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def snake_case ( self ):
'''simple docstring'''
import torch
lowerCAmelCase__ :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ :Dict = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
lowerCAmelCase__ :Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ :List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ :List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase__ :str = ds.sort('id' ).select(range(__UpperCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowerCAmelCase__ :Tuple = self._load_datasamples(1 )
lowerCAmelCase__ :Any = WhisperFeatureExtractor()
lowerCAmelCase__ :List[str] = feature_extractor(__UpperCAmelCase , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , __UpperCAmelCase , atol=1E-4 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ :int = self._load_datasamples(1 )[0]
lowerCAmelCase__ :Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
lowerCAmelCase__ :Any = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCAmelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCAmelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase ) - 1 ) < 1E-3 ) )
| 254
| 1
|
from bisect import bisect
from itertools import accumulate
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = sorted(zip(_lowerCamelCase , _lowerCamelCase ) , key=lambda lowercase__ : x[0] / x[1] , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [i[0] for i in r], [i[1] for i in r]
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(accumulate(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Optional[int] = bisect(_lowerCamelCase , _lowerCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87
| 0
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCAmelCase( __lowerCamelCase ):
__a = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def lowerCAmelCase( __lowerCamelCase ):
return (gray > 127) & (gray <= 255)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = np.zeros_like(lowerCamelCase_ )
__a = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__a = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__a = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__a = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCamelCase_ : Optional[int] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
lowerCamelCase_ : int = np.array(Image.open(lena_path))
# kernel to be applied
lowerCamelCase_ : Optional[int] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCamelCase_ : int = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCamelCase_ : int = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 358
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197
| 0
|
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class snake_case :
'''simple docstring'''
A_ : Tuple = True
A_ : Union[str, Any] = None
# Automatically constructed
A_ : List[Any] = "PIL.Image.Image"
A_ : Any = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A_ : Optional[int] = field(default="Image" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : str ):
'''simple docstring'''
return self.pa_type
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Any ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_a, _a ):
__A = np.array(_a )
if isinstance(_a, _a ):
return {"path": value, "bytes": None}
elif isinstance(_a, _a ):
return {"path": None, "bytes": value}
elif isinstance(_a, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_a )
elif isinstance(_a, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_a )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : str, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
__A = {}
__A , __A = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(_a ):
__A = PIL.Image.open(_a )
else:
__A = path.split('''::''' )[-1]
try:
__A = string_to_dict(_a, config.HUB_DATASETS_URL )['''repo_id''']
__A = token_per_repo_id.get(_a )
except ValueError:
__A = None
with xopen(_a, '''rb''', use_auth_token=_a ) as f:
__A = BytesIO(f.read() )
__A = PIL.Image.open(bytes_ )
else:
__A = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : str ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
__A = pa.array([None] * len(_a ), type=pa.binary() )
__A = pa.StructArray.from_arrays([bytes_array, storage], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__A = pa.array([None] * len(_a ), type=pa.string() )
__A = pa.StructArray.from_arrays([storage, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__A = storage.field('''bytes''' )
else:
__A = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__A = storage.field('''path''' )
else:
__A = pa.array([None] * len(_a ), type=pa.string() )
__A = pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__A = pa.array(
[encode_np_array(np.array(_a ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
__A = pa.array([None] * len(_a ), type=pa.string() )
__A = pa.StructArray.from_arrays(
[bytes_array, path_array], ['''bytes''', '''path'''], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[int] ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase : Union[str, Any] ):
with xopen(_a, '''rb''' ) as f:
__A = f.read()
return bytes_
__A = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__A = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field('''path''' ).to_pylist()], type=pa.string(), )
__A = pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
def lowerCAmelCase ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__A = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = BytesIO()
if image.format in list_image_compression_formats():
__A = image.format
else:
__A = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(snake_case__ , format=snake_case__ )
return buffer.getvalue()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if hasattr(snake_case__ , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
__A = array.dtype
__A = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
__A = dtype.kind
__A = dtype.itemsize
__A = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__A = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__A = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__A = dtype_byteorder + dtype_kind + str(snake_case__ )
__A = np.dtype(snake_case__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
__A = PIL.Image.fromarray(array.astype(snake_case__ ) )
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
__A , __A = first_non_null_value(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(snake_case__ , np.ndarray ):
__A = no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
elif isinstance(snake_case__ , PIL.Image.Image ):
__A = no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
else:
return objs
else:
return objs
| 266
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = num_patches + 1
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTMSNModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a__ ( ) -> Any:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(2 )
lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_a )
# verify the logits
lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 291
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__SCREAMING_SNAKE_CASE : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=18 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=None , ) ->Any:
"""simple docstring"""
snake_case_ = size if size is not None else {"""height""": 20, """width""": 20}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = [512, 1_024, 2_048, 4_096]
snake_case_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
snake_case_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_convert_rgb""" ) )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = self.image_processor_tester.prepare_dummy_image()
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = 2_048
snake_case_ = image_processor(UpperCAmelCase_ , return_tensors="""pt""" , max_patches=UpperCAmelCase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ = image_processor(
UpperCAmelCase_ , return_tensors="""pt""" , max_patches=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
snake_case_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCAmelCase_ ):
snake_case_ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase_ ).flattened_patches
snake_case_ = """Hello"""
snake_case_ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase_ , header_text=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ = image_processor(
UpperCAmelCase_ , return_tensors="""pt""" , max_patches=UpperCAmelCase_ , header_text=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
snake_case_ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ = image_processor(
UpperCAmelCase_ , return_tensors="""pt""" , max_patches=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
snake_case_ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ = image_processor(
UpperCAmelCase_ , return_tensors="""pt""" , max_patches=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = PixaStructImageProcessingTester(self , num_channels=4 )
snake_case_ = 3
@property
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_convert_rgb""" ) )
def lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ = image_processor(
UpperCAmelCase_ , return_tensors="""pt""" , max_patches=UpperCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 233
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> list:
snake_case_ = length or len(_SCREAMING_SNAKE_CASE )
snake_case_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case_ , snake_case_ = list_data[i + 1], list_data[i]
snake_case_ = True
return list_data if not swapped else bubble_sort(_SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
| 1
|
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
__lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase_ ( UpperCamelCase__ : int = 5000 ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCamelCase__ )]
for i, pentagonal_i in enumerate(UpperCamelCase__ ):
for j in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
__lowerCamelCase = pentagonal_nums[j]
__lowerCamelCase = pentagonal_i + pentagonal_j
__lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCamelCase__ ) and is_pentagonal(UpperCamelCase__ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90
|
'''simple docstring'''
from __future__ import annotations
A_ = list[list[int]]
# assigning initial values to the grid
A_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A_ ( snake_case , snake_case , snake_case , snake_case ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A_ ( snake_case ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A_ ( snake_case ):
if location := find_empty_location(snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[str] = digit
if sudoku(snake_case ) is not None:
return grid
SCREAMING_SNAKE_CASE:List[Any] = 0
return None
def A_ ( snake_case ):
for row in grid:
for cell in row:
print(snake_case , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
A_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 139
| 0
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( a__ ):
'''simple docstring'''
_a = ['vqvae']
def __init__( self : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : Dict, )-> str:
super().__init__()
self.register_modules(unet=_lowerCamelCase, scheduler=_lowerCamelCase, mel=_lowerCamelCase, vqvae=_lowerCamelCase )
def snake_case ( self : Optional[int] )-> int:
return 50 if isinstance(self.scheduler, _lowerCamelCase ) else 1000
@torch.no_grad()
def __call__( self : Dict, lowerCamelCase : Optional[int] = 1, lowerCamelCase : int = None, lowerCamelCase : str = None, lowerCamelCase : int = 0, lowerCamelCase : Any = 0, lowerCamelCase : str = None, lowerCamelCase : Any = None, lowerCamelCase : int = 0, lowerCamelCase : Tuple = 0, lowerCamelCase : Any = None, lowerCamelCase : int = 0, lowerCamelCase : Union[str, Any] = None, lowerCamelCase : Dict = None, lowerCamelCase : Optional[int]=True, )-> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
lowerCamelCase__ : Union[str, Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowerCamelCase )
lowerCamelCase__ : List[Any] =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCamelCase__ : List[Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCamelCase__ : Dict =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
), generator=_lowerCamelCase, device=self.device, )
lowerCamelCase__ : List[Any] =noise
lowerCamelCase__ : int =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowerCamelCase, _lowerCamelCase )
lowerCamelCase__ : Optional[int] =self.mel.audio_slice_to_image(_lowerCamelCase )
lowerCamelCase__ : Dict =np.frombuffer(input_image.tobytes(), dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
lowerCamelCase__ : Dict =(input_image / 255) * 2 - 1
lowerCamelCase__ : str =torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCamelCase__ : List[Any] =self.vqvae.encode(torch.unsqueeze(_lowerCamelCase, 0 ) ).latent_dist.sample(
generator=_lowerCamelCase )[0]
lowerCamelCase__ : List[str] =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCamelCase__ : Dict =self.scheduler.add_noise(_lowerCamelCase, _lowerCamelCase, self.scheduler.timesteps[start_step - 1] )
lowerCamelCase__ : Dict =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCamelCase__ : Any =int(mask_start_secs * pixels_per_second )
lowerCamelCase__ : int =int(mask_end_secs * pixels_per_second )
lowerCamelCase__ : Dict =self.scheduler.add_noise(_lowerCamelCase, _lowerCamelCase, torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet, _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =self.unet(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )['''sample''']
else:
lowerCamelCase__ : Optional[int] =self.unet(_lowerCamelCase, _lowerCamelCase )['''sample''']
if isinstance(self.scheduler, _lowerCamelCase ):
lowerCamelCase__ : int =self.scheduler.step(
model_output=_lowerCamelCase, timestep=_lowerCamelCase, sample=_lowerCamelCase, eta=_lowerCamelCase, generator=_lowerCamelCase, )['''prev_sample''']
else:
lowerCamelCase__ : int =self.scheduler.step(
model_output=_lowerCamelCase, timestep=_lowerCamelCase, sample=_lowerCamelCase, generator=_lowerCamelCase, )['''prev_sample''']
if mask is not None:
if mask_start > 0:
lowerCamelCase__ : Any =mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCamelCase__ : Dict =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCamelCase__ : List[str] =1 / self.vqvae.config.scaling_factor * images
lowerCamelCase__ : Dict =self.vqvae.decode(_lowerCamelCase )['''sample''']
lowerCamelCase__ : Union[str, Any] =(images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase__ : int =images.cpu().permute(0, 2, 3, 1 ).numpy()
lowerCamelCase__ : str =(images * 255).round().astype('''uint8''' )
lowerCamelCase__ : Union[str, Any] =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowerCamelCase, mode='''RGB''' ).convert('''L''' ) for _ in images) )
lowerCamelCase__ : Optional[int] =[self.mel.image_to_audio(_lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowerCamelCase )[:, np.newaxis, :] ), **ImagePipelineOutput(_lowerCamelCase ) )
@torch.no_grad()
def snake_case ( self : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Dict = 50 )-> np.ndarray:
assert isinstance(self.scheduler, _lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase )
lowerCamelCase__ : int =np.array(
[np.frombuffer(image.tobytes(), dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCamelCase__ : Union[str, Any] =(sample / 255) * 2 - 1
lowerCamelCase__ : Any =torch.Tensor(_lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,) ) ):
lowerCamelCase__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCamelCase__ : Optional[Any] =self.scheduler.alphas_cumprod[t]
lowerCamelCase__ : Optional[int] =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCamelCase__ : Union[str, Any] =1 - alpha_prod_t
lowerCamelCase__ : Union[str, Any] =self.unet(_lowerCamelCase, _lowerCamelCase )['''sample''']
lowerCamelCase__ : Dict =(1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCamelCase__ : Tuple =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCamelCase__ : str =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def snake_case ( lowerCamelCase : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Dict )-> torch.Tensor:
lowerCamelCase__ : str =acos(torch.dot(torch.flatten(_lowerCamelCase ), torch.flatten(_lowerCamelCase ) ) / torch.norm(_lowerCamelCase ) / torch.norm(_lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowerCamelCase ) + sin(alpha * theta ) * xa / sin(_lowerCamelCase )
| 369
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Union[str, Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : str )-> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] =controlnet_params
lowerCamelCase__ : Dict ='''bird'''
lowerCamelCase__ : List[str] =jax.device_count()
lowerCamelCase__ : Optional[Any] =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : Optional[int] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Dict =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : Tuple =replicate(lowerCamelCase )
lowerCamelCase__ : Tuple =shard(lowerCamelCase )
lowerCamelCase__ : Optional[int] =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : Any =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Dict =jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Dict =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[Any] =controlnet_params
lowerCamelCase__ : int ='''Chef in the kitchen'''
lowerCamelCase__ : Optional[Any] =jax.device_count()
lowerCamelCase__ : Any =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : int =replicate(lowerCamelCase )
lowerCamelCase__ : List[Any] =shard(lowerCamelCase )
lowerCamelCase__ : int =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : int =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Any =jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 272
| 0
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_UpperCamelCase = '''<<<<<<< This should probably be modified because it mentions: '''
_UpperCamelCase = '''=======
>>>>>>>
'''
_UpperCamelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_UpperCamelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def lowercase_ ( lowerCAmelCase__ : Namespace ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _A ( __SCREAMING_SNAKE_CASE ):
@staticmethod
def __A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = get_logger("""datasets-cli/converting""" )
__UpperCAmelCase : Optional[Any] = tfds_path
__UpperCAmelCase : Dict = datasets_directory
def __A ( self ) -> int:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__UpperCAmelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__UpperCAmelCase : str = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
__UpperCAmelCase : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : List[str] = {}
if os.path.isdir(self._tfds_path ):
__UpperCAmelCase : Tuple = os.listdir(__UpperCAmelCase )
else:
__UpperCAmelCase : List[str] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__UpperCAmelCase : Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if not os.path.isfile(__UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(__UpperCAmelCase , encoding="""utf-8""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
__UpperCAmelCase : str = []
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = []
for line in lines:
__UpperCAmelCase : int = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__UpperCAmelCase : Tuple = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
__UpperCAmelCase : Any = """"""
continue
elif "from absl import logging" in out_line:
__UpperCAmelCase : Any = """from datasets import logging\n"""
elif "getLogger" in out_line:
__UpperCAmelCase : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Tuple = list(filter(lambda __UpperCAmelCase : e in out_line , __UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__UpperCAmelCase ) + """\n""" )
out_lines.append(__UpperCAmelCase )
out_lines.append(__UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__UpperCAmelCase : int = re.sub(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__UpperCAmelCase : Optional[Any] = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , __UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
__UpperCAmelCase : Dict = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__UpperCAmelCase : Union[str, Any] = True
out_lines.append(__UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__UpperCAmelCase : List[str] = f_name.replace(""".py""" , """""" )
__UpperCAmelCase : Tuple = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.writelines(__UpperCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__UpperCAmelCase : Any = os.path.basename(__UpperCAmelCase )
__UpperCAmelCase : Any = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(__UpperCAmelCase , __UpperCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 254
|
'''simple docstring'''
import qiskit
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase : Any = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__UpperCAmelCase : int = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 254
| 1
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
if openai_config_file == "":
UpperCamelCase_ = OpenAIGPTConfig()
else:
UpperCamelCase_ = OpenAIGPTConfig.from_json_file(a__ )
UpperCamelCase_ = OpenAIGPTModel(a__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a__ , a__ , a__ )
# Save pytorch-model
UpperCamelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , a__ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
SCREAMING_SNAKE_CASE :str = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 368
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE :List[str] = logging.getLogger(__name__)
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=SCREAMING_SNAKE_CASE_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=SCREAMING_SNAKE_CASE_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=SCREAMING_SNAKE_CASE_ , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=SCREAMING_SNAKE_CASE_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=SCREAMING_SNAKE_CASE_ , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=SCREAMING_SNAKE_CASE_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
UpperCamelCase_ = parser.parse_args()
return args
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
def fn(SCREAMING_SNAKE_CASE_ ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCamelCase_ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCamelCase_ = tf.train.Features(feature=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = tf.train.Example(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE_ )
return records
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase_ = min(len(SCREAMING_SNAKE_CASE_ ) , args.limit )
UpperCamelCase_ = dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
print(f"Limiting the dataset to {args.limit} entries." )
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase_ = tokenize_function(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE_ ):
# Concatenate all texts.
UpperCamelCase_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase_ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase_ = dataset_tokenized.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=1_0_0_0 , num_proc=4 )
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE_ ) , args.shard_size ):
UpperCamelCase_ = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase_ = len(dataset_snapshot["input_ids"] )
UpperCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , f"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase_ = get_serialized_examples(SCREAMING_SNAKE_CASE_ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE_ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase_ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE_ )
print("Wrote file {} containing {} records".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , "w" ) as f:
print(f"Total {args.split} records: {total_records}" , file=SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = parse_args()
main(args)
| 60
| 0
|
'''simple docstring'''
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = len(lowerCamelCase )
for i in range(1 , lowerCamelCase ):
lowerCAmelCase = collection[i]
lowerCAmelCase = 0
lowerCAmelCase = i - 1
while low <= high:
lowerCAmelCase = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase = mid - 1
else:
lowerCAmelCase = mid + 1
for j in range(lowerCamelCase , lowerCamelCase , -1 ):
lowerCAmelCase = collection[j - 1]
lowerCAmelCase = val
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 4
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] ) -> Optional[Any]:
'''simple docstring'''
return np.maximum(0 , lowerCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 197
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Optional[Any] = """resnet"""
a__ : Tuple = ["""basic""", """bottleneck"""]
def __init__(self : List[Any] , __a : Any=3 , __a : Dict=64 , __a : Union[str, Any]=[256, 512, 1024, 2048] , __a : str=[3, 4, 6, 3] , __a : Optional[Any]="bottleneck" , __a : Tuple="relu" , __a : int=False , __a : Optional[int]=None , __a : str=None , **__a : Dict , ):
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = downsample_in_first_stage
UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class __A ( UpperCamelCase__ ):
a__ : int = version.parse("""1.11""" )
@property
def _lowercase (self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase (self : str ):
return 1E-3
| 106
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: int ={
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_: Union[str, Any] =['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_: List[str] =[
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 106
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = '''big_bird'''
def __init__( self : Optional[Any] , __a : Any=50358 , __a : str=768 , __a : List[str]=12 , __a : int=12 , __a : Optional[Any]=3072 , __a : Any="gelu_new" , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : List[Any]=4096 , __a : Dict=2 , __a : List[Any]=0.02 , __a : List[str]=1E-12 , __a : List[Any]=True , __a : Dict=0 , __a : str=1 , __a : List[Any]=2 , __a : Any=66 , __a : Optional[Any]="block_sparse" , __a : Any=True , __a : Any=False , __a : List[str]=64 , __a : Dict=3 , __a : Optional[int]=None , **__a : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
__lowercase : Tuple = vocab_size
__lowercase : List[str] = max_position_embeddings
__lowercase : str = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : Tuple = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : int = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : str = type_vocab_size
__lowercase : Any = layer_norm_eps
__lowercase : Any = use_cache
__lowercase : Tuple = rescale_embeddings
__lowercase : str = attention_type
__lowercase : List[Any] = use_bias
__lowercase : int = block_size
__lowercase : Tuple = num_random_blocks
__lowercase : Union[str, Any] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 233
|
def snake_case_ ( lowerCAmelCase_ : list ):
if len(lowerCAmelCase_ ) <= 1:
return [tuple(lowerCAmelCase_ )]
__lowercase : Any = []
def generate(lowerCAmelCase_ : int , lowerCAmelCase_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowercase , __lowercase : List[str] = arr[k - 1], arr[i]
else: # k is odd
__lowercase , __lowercase : Any = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase_ )
generate(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return res
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 233
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __lowerCAmelCase ( __a ):
snake_case : List[str] = """speech_to_text"""
snake_case : int = ["""past_key_values"""]
snake_case : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self , lowerCAmelCase__=1_0_0_0_0 , lowerCAmelCase__=1_2 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=6_0_0_0 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2 , lowerCAmelCase__=(5, 5) , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=8_0 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ):
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Tuple = d_model
_UpperCAmelCase : Optional[int] = encoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : Optional[int] = encoder_attention_heads
_UpperCAmelCase : Dict = decoder_ffn_dim
_UpperCAmelCase : Optional[Any] = decoder_layers
_UpperCAmelCase : int = decoder_attention_heads
_UpperCAmelCase : List[Any] = dropout
_UpperCAmelCase : Optional[Any] = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : Dict = activation_function
_UpperCAmelCase : Union[str, Any] = init_std
_UpperCAmelCase : str = encoder_layerdrop
_UpperCAmelCase : List[str] = decoder_layerdrop
_UpperCAmelCase : List[Any] = use_cache
_UpperCAmelCase : List[str] = encoder_layers
_UpperCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : int = max_source_positions
_UpperCAmelCase : int = max_target_positions
_UpperCAmelCase : Optional[Any] = num_conv_layers
_UpperCAmelCase : Dict = list(lowerCAmelCase__ )
_UpperCAmelCase : str = conv_channels
_UpperCAmelCase : int = input_feat_per_channel
_UpperCAmelCase : Optional[Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 367
|
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = 0
while len(lowerCAmelCase_ ) > 1:
_UpperCAmelCase : List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_UpperCAmelCase : Optional[Any] = files.index(min(lowerCAmelCase_ ) )
temp += files[min_index]
files.pop(lowerCAmelCase_ )
files.append(lowerCAmelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =(EulerDiscreteScheduler,)
lowerCamelCase : Dict =10
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.scheduler_classes[0]
__lowerCAmelCase : int = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Tuple = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = output.prev_sample
__lowerCAmelCase : str = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : List[Any] = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Any = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Dict = output.prev_sample
__lowerCAmelCase : List[str] = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : Dict = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : Dict = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Any = output.prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase , use_karras_sigmas=lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : int = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : List[Any] = output.prev_sample
__lowerCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 139
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any]=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : int=False , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : Optional[int]=19 , lowerCAmelCase : Any=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : Dict=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : int = num_labels
__lowerCAmelCase : Tuple = num_choices
__lowerCAmelCase : str = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = EsmForProteinFolding(config=lowerCAmelCase ).float()
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Any = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
__lowerCAmelCase : str = model(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : Optional[int] = config_and_inputs
__lowerCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple =False
lowerCamelCase : int =(EsmForProteinFolding,) if is_torch_available() else ()
lowerCamelCase : int =()
lowerCamelCase : Dict ={} if is_torch_available() else {}
lowerCamelCase : str =False
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = EsmFoldModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@unittest.skip("""Does not support attention outputs""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold only has one output format.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@require_torch
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowerCAmelCase : str = model(lowerCAmelCase )["""positions"""]
__lowerCAmelCase : str = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase , atol=1e-4 ) )
| 139
| 1
|
"""simple docstring"""
import numpy as np
def __a ( __lowerCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __a ( __lowerCamelCase ):
return vector * sigmoid(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
|
"""simple docstring"""
def _snake_case ( _snake_case : list ):
def merge(_snake_case : list , _snake_case : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_snake_case ) <= 1:
return collection
lowerCAmelCase : Union[str, Any] = len(_snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 60
| 0
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , _a , _a , _a = None , _a = 50_257 , _a = 1_024 , _a = 768 , _a = 12 , _a = 12 , _a = None , _a = "gelu_new" , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 1e-5 , _a = 0.02 , _a = True , _a = True , _a = False , _a = False , ):
"""simple docstring"""
super().__init__()
lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
lowerCamelCase = prefix_inner_dim
lowerCamelCase = prefix_hidden_dim
lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , _a ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase = GPTaConfig(
vocab_size=_a , n_positions=_a , n_embd=_a , n_layer=_a , n_head=_a , n_inner=_a , activation_function=_a , resid_pdrop=_a , embd_pdrop=_a , attn_pdrop=_a , layer_norm_epsilon=_a , initializer_range=_a , scale_attn_weights=_a , use_cache=_a , scale_attn_by_inverse_layer_idx=_a , reorder_and_upcast_attn=_a , )
lowerCamelCase = GPTaLMHeadModel(_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , _a = None , ):
"""simple docstring"""
lowerCamelCase = self.transformer.transformer.wte(_a )
lowerCamelCase = self.encode_prefix(_a )
lowerCamelCase = self.decode_prefix(_a )
lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCamelCase = self.transformer(inputs_embeds=_a , labels=_a , attention_mask=_a )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
return torch.zeros(_a , self.prefix_length , dtype=torch.intaa , device=_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.encode_prefix(_a )
@torch.no_grad()
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = torch.split(_a , 1 , dim=0 )
lowerCamelCase = []
lowerCamelCase = []
for feature in features:
lowerCamelCase = self.decode_prefix(feature.to(_a ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase , lowerCamelCase = self.generate_beam(
input_embeds=_a , device=_a , eos_token_id=_a )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase = torch.stack(_a )
lowerCamelCase = torch.stack(_a )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = 5 , _a = 67 , _a = 1.0 , _a = None , ):
"""simple docstring"""
lowerCamelCase = eos_token_id
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = torch.ones(_a , device=_a , dtype=torch.int )
lowerCamelCase = torch.zeros(_a , device=_a , dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase = input_embeds
else:
lowerCamelCase = self.transformer.transformer.wte(_a )
for i in range(_a ):
lowerCamelCase = self.transformer(inputs_embeds=_a )
lowerCamelCase = outputs.logits
lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase , lowerCamelCase = logits.topk(_a , -1 )
lowerCamelCase = generated.expand(_a , *generated.shape[1:] )
lowerCamelCase , lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase = next_tokens
else:
lowerCamelCase = tokens.expand(_a , *tokens.shape[1:] )
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCamelCase = -float(np.inf )
lowerCamelCase = 0
lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase = scores_sum / seq_lengths[:, None]
lowerCamelCase , lowerCamelCase = scores_sum_average.view(-1 ).topk(_a , -1 )
lowerCamelCase = next_tokens // scores_sum.shape[1]
lowerCamelCase = seq_lengths[next_tokens_source]
lowerCamelCase = next_tokens % scores_sum.shape[1]
lowerCamelCase = next_tokens.unsqueeze(1 )
lowerCamelCase = tokens[next_tokens_source]
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
lowerCamelCase = generated[next_tokens_source]
lowerCamelCase = scores_sum_average * seq_lengths
lowerCamelCase = is_stopped[next_tokens_source]
lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
lowerCamelCase = is_stopped + next_tokens.eq(_a ).squeeze()
if is_stopped.all():
break
lowerCamelCase = scores / seq_lengths
lowerCamelCase = scores.argsort(descending=_a )
# tokens tensors are already padded to max_seq_length
lowerCamelCase = [tokens[i] for i in order]
lowerCamelCase = torch.stack(_a , dim=0 )
lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 357
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ) -> Tuple:
if attention_mask is None:
lowerCamelCase = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = OPTConfig
__UpperCamelCase = {}
__UpperCamelCase = "gelu"
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=16 , _a=16 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = eos_token_id
lowerCamelCase = pad_token_id
lowerCamelCase = bos_token_id
lowerCamelCase = embed_dim
lowerCamelCase = word_embed_proj_dim
lowerCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_a , **self.config_updates , )
lowerCamelCase = prepare_opt_inputs_dict(_a , _a )
return config, inputs_dict
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = TFOPTModel(config=_a )
lowerCamelCase = inputs_dict["""input_ids"""]
lowerCamelCase = input_ids[:1, :]
lowerCamelCase = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase = 1
# first forward pass
lowerCamelCase = model(_a , attention_mask=_a , use_cache=_a )
lowerCamelCase , lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase = model(_a , attention_mask=_a )[0]
lowerCamelCase = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
@require_tf
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__UpperCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
__UpperCamelCase = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 10
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFOPTModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_a , _a ):
if hasattr(_a , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_a , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase = model_class(config=_a )
lowerCamelCase = _get_word_embedding_weight(_a , model.get_input_embeddings() )
lowerCamelCase = _get_word_embedding_weight(_a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_a )
lowerCamelCase = _get_word_embedding_weight(_a , model.get_input_embeddings() )
lowerCamelCase = _get_word_embedding_weight(_a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _a )
# check that weights remain the same after resizing
lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase = False
self.assertTrue(_a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _a )
lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase = False
self.assertTrue(_a )
def a__ ( snake_case__ ) -> List[Any]:
return tf.constant(snake_case__ , dtype=tf.intaa )
@require_tf
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = 99
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase = input_ids.shape[0]
lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCamelCase = tf.not_equal(_a , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase = model(input_ids=_a , attention_mask=_a ).last_hidden_state
lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , _a )
lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=4e-3 ) )
lowerCamelCase = tf.function(_a , jit_compile=_a )
lowerCamelCase = xla_generate(_a , _a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=4e-2 ) )
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase = """facebook/opt-350m"""
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase = tokenizer(_a , return_tensors="""tf""" , padding=_a , add_special_tokens=_a )
lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(_a , _a , atol=1e-4 ) )
lowerCamelCase = tf.function(_a , jit_compile=_a )
lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_a , _a , atol=1e-4 ) )
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """facebook/opt-125m"""
lowerCamelCase = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase = []
lowerCamelCase = GPTaTokenizer.from_pretrained(_a )
lowerCamelCase = TFOPTForCausalLM.from_pretrained(_a )
for prompt in self.prompts:
lowerCamelCase = tokenizer(_a , return_tensors="""tf""" ).input_ids
lowerCamelCase = model.generate(_a , max_length=10 )
lowerCamelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a )
predicted_outputs += generated_string
self.assertListEqual(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """facebook/opt-350m"""
lowerCamelCase = GPTaTokenizer.from_pretrained(_a )
lowerCamelCase = TFOPTForCausalLM.from_pretrained(_a )
lowerCamelCase = """left"""
# use different length sentences to test batching
lowerCamelCase = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase = tokenizer(_a , return_tensors="""tf""" , padding=_a )
lowerCamelCase = inputs["""input_ids"""]
lowerCamelCase = model.generate(input_ids=_a , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase = model.generate(input_ids=_a )
lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings )
lowerCamelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a )
lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
lowerCamelCase = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """facebook/opt-350m"""
lowerCamelCase = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase = []
lowerCamelCase = GPTaTokenizer.from_pretrained(_a )
lowerCamelCase = TFOPTForCausalLM.from_pretrained(_a )
for prompt in self.prompts:
lowerCamelCase = tokenizer(_a , return_tensors="""tf""" ).input_ids
lowerCamelCase = model.generate(_a , max_length=10 )
lowerCamelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a )
predicted_outputs += generated_string
self.assertListEqual(_a , _a )
| 168
| 0
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__UpperCamelCase : List[Any] = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__UpperCamelCase : Optional[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8,
}
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
with open(A_ , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : Dict = json.loads(f.read() )
lowerCAmelCase__ : Union[str, Any] = collections.OrderedDict()
lowerCAmelCase__ : Optional[int] = collections.OrderedDict()
lowerCAmelCase__ : Optional[Any] = collections.OrderedDict()
with open(A_ , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : str = f.readlines()
lowerCAmelCase__ : Optional[int] = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A_ ):
lowerCAmelCase__ : int = b
lowerCAmelCase__ : Dict = idx
for wd in b:
lowerCAmelCase__ : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] ,lowercase_ : Dict ,lowercase_ : Dict ,lowercase_ : int="<|endoftext|>" ,lowercase_ : Dict="<|endoftext|>" ,lowercase_ : str="<|startoftext|>" ,lowercase_ : str="<|endoftext|>" ,lowercase_ : List[Any]=False ,**lowercase_ : Optional[Any] ,):
super().__init__(
unk_token=lowercase_ ,pad_token=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,do_clean_text=lowercase_ ,**lowercase_ ,)
if not os.path.isfile(lowercase_ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(lowercase_ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
lowerCAmelCase__ : Tuple = do_clean_text
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = load_vocab_and_emoji(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Tuple = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def __lowerCAmelCase ( self : int ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __lowerCAmelCase ( self : Union[str, Any] ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def __lowerCAmelCase ( self : Any ,lowercase_ : List[Any] ):
return self.subword_tokenizer.tokenize(lowercase_ ,clean=self.do_clean_text )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[str] ):
return self.vocab.get(lowercase_ ,self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self : int ,lowercase_ : str ):
return self.subword_tokenizer.convert_id_to_token(lowercase_ )
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Union[str, Any] = ''''''.join(lowercase_ ).strip()
return out_string
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : "Conversation" ):
lowerCAmelCase__ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ ,add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
lowerCAmelCase__ : Dict = input_ids[-self.model_max_length :]
return input_ids
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
lowerCAmelCase__ : Tuple = 0
if os.path.isdir(lowercase_ ):
lowerCAmelCase__ : Union[str, Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : List[Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
lowerCAmelCase__ : str = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
lowerCAmelCase__ : Any = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(lowercase_ ,'''w''' ,encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
lowerCAmelCase__ : Dict = token_index
writer.write(''','''.join(lowercase_ ) + '''\n''' )
index += 1
with open(lowercase_ ,'''w''' ,encoding='''utf-8''' ) as writer:
json.dump(self.emoji ,lowercase_ )
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : str ,lowercase_ : Optional[Any] ,lowercase_ : Dict ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = vocab # same as swe
lowerCAmelCase__ : Optional[int] = ids_to_tokens # same as bpe
lowerCAmelCase__ : Optional[Any] = emoji
lowerCAmelCase__ : Any = np.max([len(lowercase_ ) for w in self.vocab.keys()] )
lowerCAmelCase__ : int = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
lowerCAmelCase__ : str = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
lowerCAmelCase__ : List[str] = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
lowerCAmelCase__ : Union[str, Any] = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowerCAmelCase__ : List[Any] = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowerCAmelCase__ : Optional[int] = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
lowerCAmelCase__ : List[Any] = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
lowerCAmelCase__ : List[Any] = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
lowerCAmelCase__ : List[str] = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : int ):
return len(self.ids_to_tokens )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Dict ):
lowerCAmelCase__ : Tuple = self.content_repattera.sub('''<URL>''' ,lowercase_ )
lowerCAmelCase__ : List[str] = self.content_repattera.sub('''<EMAIL>''' ,lowercase_ )
lowerCAmelCase__ : str = self.content_repattera.sub('''<TEL>''' ,lowercase_ )
lowerCAmelCase__ : Dict = self.content_repattera.sub('''<DATE>''' ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = self.content_repattera.sub('''<DATE>''' ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.content_repattera.sub('''<PRICE>''' ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase__ : str = content.replace('''<BLOCK><BLOCK>''' ,'''<BLOCK>''' )
return content
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Union[str, Any] ,lowercase_ : List[str]=False ):
lowerCAmelCase__ : str = text.replace(''' ''' ,'''<SP>''' )
lowerCAmelCase__ : Tuple = text.replace(''' ''' ,'''<SP>''' )
lowerCAmelCase__ : Optional[int] = text.replace('''\r\n''' ,'''<BR>''' )
lowerCAmelCase__ : str = text.replace('''\n''' ,'''<BR>''' )
lowerCAmelCase__ : Tuple = text.replace('''\r''' ,'''<BR>''' )
lowerCAmelCase__ : Union[str, Any] = text.replace('''\t''' ,'''<TAB>''' )
lowerCAmelCase__ : str = text.replace('''—''' ,'''ー''' )
lowerCAmelCase__ : Union[str, Any] = text.replace('''−''' ,'''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase__ : Tuple = text.replace(lowercase_ ,lowercase_ )
if clean:
lowerCAmelCase__ : str = self.clean_text(lowercase_ )
def check_simbol(lowercase_ : Optional[int] ):
lowerCAmelCase__ : List[Any] = x.encode()
if len(lowercase_ ) == 1 and len(lowercase_ ) == 2:
lowerCAmelCase__ : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2_A1 and c <= 0xC2_BF)
or (c >= 0xC7_80 and c <= 0xC7_83)
or (c >= 0xCA_B9 and c <= 0xCB_BF)
or (c >= 0xCC_80 and c <= 0xCD_A2)
):
return True
return False
def checkuae(lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : List[str] = x.encode()
if len(lowercase_ ) == 1 and len(lowercase_ ) == 3:
lowerCAmelCase__ : int = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_80_80 and c <= 0xE2_B0_7F:
return True
return False
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[Any] = []
while pos < len(lowercase_ ):
lowerCAmelCase__ : Optional[int] = min(len(lowercase_ ) ,pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
lowerCAmelCase__ : int = [] # (token_id, token, pos)
for e in range(lowercase_ ,lowercase_ ,-1 ):
lowerCAmelCase__ : int = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowercase_ ) > 2:
lowerCAmelCase__ : List[str] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowercase_ ) > 0:
# the smallest token_id is adopted
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[0] )[0]
result.append(lowercase_ )
lowerCAmelCase__ : Tuple = e
else:
lowerCAmelCase__ : Any = pos + 1
lowerCAmelCase__ : Any = text[pos:end]
if check_simbol(lowercase_ ):
result.append('''<KIGOU>''' )
elif checkuae(lowercase_ ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
lowerCAmelCase__ : Dict = end
return result
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Union[str, Any] ,lowercase_ : Dict="\n" ):
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowercase_ ) > 0:
words.append(bytearray(lowercase_ ).decode('''utf-8''' ,errors='''replace''' ) )
lowerCAmelCase__ : Optional[int] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(lowercase_ )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(lowercase_ )
if len(lowercase_ ) > 0:
words.append(bytearray(lowercase_ ).decode('''utf-8''' ,errors='''replace''' ) )
lowerCAmelCase__ : Any = ''''''.join(lowercase_ )
return text
| 106
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 1_2_8, "min_length": 1_2, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 1_4_2, "min_length": 5_6, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 6_2, "min_length": 1_1, "num_beams": 6},
}
}
__lowerCAmelCase = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 1_2_8,
"task_specific_params.summarization.min_length": 1_2,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 1_4_2,
"task_specific_params.summarization_cnn.min_length": 5_6,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 6_2,
"task_specific_params.summarization_xsum.min_length": 1_1,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , x.transpose() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) , np.reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) ) ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.squeeze(_SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(1 , 3 , 4 )
__lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
__lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(1 , 3 , 4 )
__lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
__lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(1 , 3 , 4 )
__lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 368
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForMaskedImageModeling(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForMaskedImageModeling(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_a : Optional[Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_a : str = False
_a : str = False
_a : List[str] = False
_a : Optional[int] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="tf" )
# forward pass
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 102
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowercase ( A__ ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = params
SCREAMING_SNAKE_CASE_: Optional[Any] = np.array(__lowercase)
SCREAMING_SNAKE_CASE_: Dict = np.array([len(__lowercase) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return (self.token_ids[index], self.lengths[index])
def __len__( self : str):
return len(self.lengths)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = self.params.max_model_input_size
SCREAMING_SNAKE_CASE_: int = self.lengths > max_len
logger.info(F"Splitting {sum(__lowercase)} too long sequences.")
def divide_chunks(lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]):
return [l[i : i + n] for i in range(0 , len(__lowercase) , __lowercase)]
SCREAMING_SNAKE_CASE_: Any = []
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
if self.params.mlm:
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
SCREAMING_SNAKE_CASE_: Dict = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
SCREAMING_SNAKE_CASE_: Any = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE_: int = np.insert(__lowercase , 0 , __lowercase)
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE_: List[str] = np.insert(__lowercase , len(__lowercase) , __lowercase)
assert len(__lowercase) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase)
new_tok_ids.extend(__lowercase)
new_lengths.extend([len(__lowercase) for l in sub_seqs])
SCREAMING_SNAKE_CASE_: Dict = np.array(__lowercase)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.array(__lowercase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = len(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.lengths > 11
SCREAMING_SNAKE_CASE_: str = self.token_ids[indices]
SCREAMING_SNAKE_CASE_: Optional[int] = self.lengths[indices]
SCREAMING_SNAKE_CASE_: List[Any] = len(self)
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences.")
def _SCREAMING_SNAKE_CASE ( self : str):
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE_: List[Any] = len(self)
SCREAMING_SNAKE_CASE_: List[Any] = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
SCREAMING_SNAKE_CASE_: str = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE_: Tuple = self.token_ids[indices]
SCREAMING_SNAKE_CASE_: List[Any] = self.lengths[indices]
SCREAMING_SNAKE_CASE_: List[Any] = len(self)
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).")
def _SCREAMING_SNAKE_CASE ( self : int):
if not self.params.is_master:
return
logger.info(F"{len(self)} sequences")
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Optional[Any] = [t[0] for t in batch]
SCREAMING_SNAKE_CASE_: List[Any] = [t[1] for t in batch]
assert len(__lowercase) == len(__lowercase)
# Max for paddings
SCREAMING_SNAKE_CASE_: Tuple = max(__lowercase)
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE_: Optional[int] = self.params.special_tok_ids["""pad_token"""]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE_: List[str] = [list(t.astype(__lowercase)) + [pad_idx] * (max_seq_len_ - len(__lowercase)) for t in token_ids]
assert len(tk_) == len(__lowercase)
assert all(len(__lowercase) == max_seq_len_ for t in tk_)
SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor(tk_) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE_: Tuple = torch.tensor(__lowercase) # (bs)
return tk_t, lg_t
| 13
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_lowercase : Any =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> int:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , """vision""" )
self.check_model_type(__lowercase )
def __call__( self , __lowercase , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Optional[Any]:
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = load_image(__lowercase )
a__ : Optional[int] = image.size
a__ : int = self.image_processor(images=__lowercase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = self.model(**__lowercase )
return model_outputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Optional[int] = model_outputs.predicted_depth
a__ : Any = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=__lowercase )
a__ : Union[str, Any] = prediction.squeeze().cpu().numpy()
a__ : List[str] = (output * 2_5_5 / np.max(__lowercase )).astype("""uint8""" )
a__ : Any = Image.fromarray(__lowercase )
a__ : List[str] = {}
a__ : Tuple = predicted_depth
a__ : Any = depth
return output_dict
| 170
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : int = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Optional[Any] = '''lilt'''
def __init__( self , _UpperCamelCase=3_05_22 , _UpperCamelCase=7_68 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=30_72 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_12 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=4 , _UpperCamelCase=10_24 , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = channel_shrink_ratio
lowerCAmelCase__ = max_ad_position_embeddings
| 364
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : int = KandinskyVaaInpaintPipeline
_SCREAMING_SNAKE_CASE : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_SCREAMING_SNAKE_CASE : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_SCREAMING_SNAKE_CASE : Optional[Any] = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1_00
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_UpperCamelCase , )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(_UpperCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase__ = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**_UpperCamelCase )
lowerCAmelCase__ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ = np.ones((7_68, 7_68) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 'a hat'
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
lowerCAmelCase__ = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 122
| 0
|
'''simple docstring'''
def A_ ( snake_case ):
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case )
if number < 1:
SCREAMING_SNAKE_CASE:str = F'''Input value of [number={number}] must be > 0'''
raise ValueError(snake_case )
SCREAMING_SNAKE_CASE:List[str] = 1
for i in range(1 , snake_case ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _snake_case ( _a ):
_A : int = ComputeEnvironment.AMAZON_SAGEMAKER
_A : List[Any] = True
_A : Dict = '''ml.p3.2xlarge'''
_A : Any = '''accelerate_sagemaker_execution_role'''
_A : Union[str, Any] = '''hf-sm'''
_A : Dict = '''us-east-1'''
_A : List[Any] = 1
_A : Union[str, Any] = '''accelerate-sagemaker-1'''
_A : List[Any] = '''1.6'''
_A : Optional[Any] = '''4.4'''
_A : Any = '''train.py'''
_A : int = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
_A : Optional[Any] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE:str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["do_train"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["epochs"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["learning_rate"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["max_steps"] ,SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 139
| 1
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowercase__ ( __UpperCamelCase = 3 )-> qiskit.result.counts.Counts:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(UpperCAmelCase__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
UpperCamelCase = QuantumRegister(UpperCAmelCase__ , """qr""" )
UpperCamelCase = ClassicalRegister(UpperCAmelCase__ , """cr""" )
UpperCamelCase = QuantumCircuit(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase = number_of_qubits
for i in range(UpperCAmelCase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCAmelCase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCAmelCase__ , UpperCAmelCase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCAmelCase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCAmelCase__ , UpperCAmelCase__ )
# simulate with 10000 shots
UpperCamelCase = Aer.get_backend("""qasm_simulator""" )
UpperCamelCase = execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=10000 )
return job.result().get_counts(UpperCAmelCase__ )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}'
)
| 363
|
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__ ( __UpperCamelCase )-> Dict: # picklable for multiprocessing
return x.sum()
def lowercase__ ( __UpperCamelCase )-> Tuple: # picklable for multiprocessing
return i + 1
@dataclass
class a_ :
lowercase = 42
lowercase = 42
class a_ ( lowerCamelCase ):
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {"""a""": 2, """b""": 3}
UpperCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 2
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
UpperCamelCase = {"""a""": 2, """b""": 0, """c""": 2}
UpperCamelCase = {
"""a""": np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ),
"""b""": np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ),
"""c""": np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda _SCREAMING_SNAKE_CASE : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": 3, """b""": 4}
UpperCamelCase = {"""a""": 5, """b""": 6}
UpperCamelCase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
class a_ :
lowercase = """bar"""
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_SCREAMING_SNAKE_CASE , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
UpperCamelCase = {F"{i}": i for i in range(__UpperCamelCase )}
UpperCamelCase = map_nested(lambda __UpperCamelCase : x + 10 , __UpperCamelCase , num_proc=__UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a_ ( lowerCamelCase ):
@require_tf
def A__ ( self ) -> Any:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(_SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A__ ( self ) -> int:
"""simple docstring"""
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(_SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A__ ( self ) -> Dict:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def lowercase__ ( )-> Union[str, Any]:
UpperCamelCase = A(x=1 , y="""foobar""" )
UpperCamelCase = {"""x""": 1, """y""": """foobar"""}
assert asdict(__UpperCamelCase ) == expected_output
UpperCamelCase = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
UpperCamelCase = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def lowercase__ ( __UpperCamelCase )-> List[Any]:
return text.split()
def lowercase__ ( __UpperCamelCase )-> List[str]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase__ ( )-> int:
with Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(__UpperCamelCase ) == 4
| 183
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Dict=False , snake_case__ : Optional[Any]=False , snake_case__ : List[str]=False ) -> str:
UpperCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : str ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
UpperCamelCase : Dict = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase : List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
UpperCamelCase : Dict = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : Tuple = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase : Tuple = in_proj_bias[: config.hidden_size]
UpperCamelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( snake_case__ : str ) -> Tuple:
UpperCamelCase : Optional[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str ) -> Optional[int]:
UpperCamelCase : Optional[int] = dct.pop(lowerCAmelCase__ )
UpperCamelCase : Any = val
@torch.no_grad()
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] ) -> str:
UpperCamelCase : Optional[int] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowerCAmelCase__ )
UpperCamelCase : List[str] = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Tuple = False
UpperCamelCase : Union[str, Any] = False
if "vqa" in checkpoint_url:
UpperCamelCase : Dict = True
UpperCamelCase : List[Any] = 3129
UpperCamelCase : Tuple = 'huggingface/label-files'
UpperCamelCase : int = 'vqa2-id2label.json'
UpperCamelCase : int = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase : List[Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCamelCase : Optional[int] = idalabel
UpperCamelCase : int = {v: k for k, v in idalabel.items()}
UpperCamelCase : List[str] = ViltForQuestionAnswering(lowerCAmelCase__ )
elif "nlvr" in checkpoint_url:
UpperCamelCase : str = True
UpperCamelCase : Any = 2
UpperCamelCase : List[str] = {0: 'False', 1: 'True'}
UpperCamelCase : Tuple = {v: k for k, v in config.idalabel.items()}
UpperCamelCase : List[Any] = 3
UpperCamelCase : List[Any] = ViltForImagesAndTextClassification(lowerCAmelCase__ )
elif "irtr" in checkpoint_url:
UpperCamelCase : List[Any] = True
UpperCamelCase : List[Any] = ViltForImageAndTextRetrieval(lowerCAmelCase__ )
elif "mlm_itm" in checkpoint_url:
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = ViltForMaskedLM(lowerCAmelCase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
UpperCamelCase : int = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' )['state_dict']
UpperCamelCase : Tuple = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
if mlm_model or irtr_model:
UpperCamelCase : int = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCamelCase , UpperCamelCase : Any = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCAmelCase__ )
# Define processor
UpperCamelCase : Union[str, Any] = ViltImageProcessor(size=384 )
UpperCamelCase : Union[str, Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase : Dict = ViltProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCamelCase : Optional[int] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowerCAmelCase__ ).raw )
UpperCamelCase : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowerCAmelCase__ ).raw )
UpperCamelCase : Optional[Any] = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
UpperCamelCase : List[str] = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors='pt' )
UpperCamelCase : Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors='pt' )
UpperCamelCase : Optional[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCamelCase : Union[str, Any] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowerCAmelCase__ ).raw )
if mlm_model:
UpperCamelCase : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
UpperCamelCase : Union[str, Any] = 'How many cats are there?'
UpperCamelCase : str = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors='pt' )
UpperCamelCase : Optional[Any] = model(**lowerCAmelCase__ )
# Verify outputs
if mlm_model:
UpperCamelCase : Optional[Any] = torch.Size([1, 11, 30522] )
UpperCamelCase : Optional[int] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
UpperCamelCase : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCamelCase : str = torch.Size([1, 3129] )
UpperCamelCase : List[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase__ , atol=1E-4 )
# verify vqa prediction equals "2"
UpperCamelCase : Optional[int] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCamelCase : Dict = torch.Size([1, 2] )
UpperCamelCase : Union[str, Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 119
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = (KDPMaDiscreteScheduler,)
_lowerCAmelCase = 1_0
def __UpperCAmelCase ( self , **__magic_name__ ) -> int:
_a = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**__magic_name__ )
return config
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='v_prediction' )
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __UpperCAmelCase ( self ) -> Tuple:
if torch_device == "mps":
return
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __UpperCAmelCase ( self ) -> List[Any]:
if torch_device == "mps":
return
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ )
_a = self.dummy_model()
_a = self.dummy_sample_deter.to(__magic_name__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if str(__magic_name__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 168
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
__A = 6_5521
def __a ( lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_= 1
UpperCAmelCase_= 0
for plain_chr in plain_text:
UpperCAmelCase_= (a + ord(lowerCAmelCase_ )) % MOD_ADLER
UpperCAmelCase_= (b + a) % MOD_ADLER
return (b << 16) | a
| 277
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = " " ) -> list:
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = 0
for index, char in enumerate(__lowerCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
lowercase__ : Union[str, Any] = index + 1
elif index + 1 == len(__lowerCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 16
|
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE : List[Any] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase ( _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Optional[Any] , _snake_case : Dict ) ->Any:
"""simple docstring"""
__snake_case : List[Any] = tmp_path_factory.getbasetemp() / '''cache'''
__snake_case : int = test_hf_cache_home / '''datasets'''
__snake_case : Tuple = test_hf_cache_home / '''metrics'''
__snake_case : List[str] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_snake_case ) )
__snake_case : Optional[int] = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_snake_case ) )
__snake_case : Tuple = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_snake_case ) )
@pytest.fixture(autouse=_snake_case , scope='''session''' )
def lowercase ( ) ->Any:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Tuple ) ->Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _snake_case )
@pytest.fixture
def lowercase ( _snake_case : Any ) ->Optional[Any]:
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _snake_case )
| 102
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 365
|
"""simple docstring"""
import argparse
import os
import re
__A : Any = '''src/transformers'''
# Pattern that looks at the indentation in a line.
__A : Tuple = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__A : List[Any] = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : Dict = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__A : List[str] = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : List[Any] = re.compile(R'''\[([^\]]+)\]''')
def A_ ( snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : Any = _re_indent.search(snake_case_ )
return "" if search is None else search.groups()[0]
def A_ ( snake_case_ : str ,snake_case_ : str="" ,snake_case_ : Any=None ,snake_case_ : Union[str, Any]=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = 0
UpperCamelCase : Optional[int] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(snake_case_ ):
index += 1
UpperCamelCase : Tuple = ["""\n""".join(lines[:index] )]
else:
UpperCamelCase : Tuple = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase : Dict = [lines[index]]
index += 1
while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(snake_case_ ) )
if index < len(snake_case_ ) - 1:
UpperCamelCase : Optional[Any] = [lines[index + 1]]
index += 1
else:
UpperCamelCase : str = []
else:
blocks.append("""\n""".join(snake_case_ ) )
UpperCamelCase : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case_ ) > 0:
blocks.append("""\n""".join(snake_case_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
def _inner(snake_case_ : List[str] ):
return key(snake_case_ ).lower().replace("""_""" ,"""""" )
return _inner
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : Tuple=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(snake_case_ : Optional[int] ):
return x
if key is None:
UpperCamelCase : List[str] = noop
# Constants are all uppercase, they go first.
UpperCamelCase : List[str] = [obj for obj in objects if key(snake_case_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase : Tuple = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase : int = [obj for obj in objects if not key(snake_case_ )[0].isupper()]
UpperCamelCase : Union[str, Any] = ignore_underscore(snake_case_ )
return sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ )
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(snake_case_ : Any ):
UpperCamelCase : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
UpperCamelCase : int = [part.strip().replace("""\"""" ,"""""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : str = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(snake_case_ )] ) + "]"
UpperCamelCase : Optional[int] = import_statement.split("""\n""" )
if len(snake_case_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase : int = 2 if lines[1].strip() == """[""" else 1
UpperCamelCase : Tuple = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase : List[Any] = sort_objects(snake_case_ ,key=lambda snake_case_ : x[1] )
UpperCamelCase : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase : List[str] = _re_bracket_content.sub(_replace ,lines[1] )
else:
UpperCamelCase : List[Any] = [part.strip().replace("""\"""" ,"""""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : Optional[int] = keys[:-1]
UpperCamelCase : Union[str, Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(snake_case_ )] )
return "\n".join(snake_case_ )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase : Any = _re_bracket_content.sub(_replace ,snake_case_ )
return import_statement
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : int=True ):
'''simple docstring'''
with open(snake_case_ ,encoding="""utf-8""" ) as f:
UpperCamelCase : List[str] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase : int = split_code_in_indented_blocks(
snake_case_ ,start_prompt="""_import_structure = {""" ,end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(snake_case_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase : Dict = main_blocks[block_idx]
UpperCamelCase : Dict = block.split("""\n""" )
# Get to the start of the imports.
UpperCamelCase : List[str] = 0
while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase : Optional[Any] = len(snake_case_ )
else:
line_idx += 1
if line_idx >= len(snake_case_ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase : Optional[Any] = """\n""".join(block_lines[line_idx:-1] )
UpperCamelCase : Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase : List[Any] = split_code_in_indented_blocks(snake_case_ ,indent_level=snake_case_ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase : Optional[Any] = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase : Optional[Any] = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase : Any = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None]
UpperCamelCase : Union[str, Any] = [x[0] for x in sorted(snake_case_ ,key=lambda snake_case_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase : str = 0
UpperCamelCase : List[str] = []
for i in range(len(snake_case_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
UpperCamelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(snake_case_ )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase : Tuple = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case_ ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(snake_case_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write("""\n""".join(snake_case_ ) )
def A_ ( snake_case_ : int=True ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
UpperCamelCase : Optional[int] = sort_imports(os.path.join(snake_case_ ,"""__init__.py""" ) ,check_only=snake_case_ )
if result:
UpperCamelCase : List[Any] = [os.path.join(snake_case_ ,"""__init__.py""" )]
if len(snake_case_ ) > 0:
raise ValueError(f'Would overwrite {len(snake_case_ )} files, run `make style`.' )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__A : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 27
| 0
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
print("Loading config file..." )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_="." ):
lowerCamelCase : Optional[int] = []
for k, v in d.items():
lowerCamelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(a__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(a__ , a__ , sep=a__ ).items() )
else:
items.append((new_key, v) )
return dict(a__ )
lowerCamelCase : List[str] = argparse.Namespace()
with open(a__ , "r" ) as yaml_file:
try:
lowerCamelCase : List[Any] = yaml.load(a__ , Loader=yaml.FullLoader )
lowerCamelCase : Any = flatten_yaml_as_dict(a__ )
for k, v in flat_cfg.items():
setattr(a__ , a__ , a__ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(a__ , str(a__ ) ) )
return config
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase : str = False
# dataset
if task_name.startswith("imagenet1k_" ):
lowerCamelCase : Optional[Any] = 1000
if int(task_name.strip().split("_" )[-1] ) == 384:
lowerCamelCase : Tuple = 384
else:
lowerCamelCase : Optional[int] = 256
lowerCamelCase : Dict = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
lowerCamelCase : int = 21000
if int(task_name.strip().split("_" )[-1] ) == 384:
lowerCamelCase : Tuple = 384
else:
lowerCamelCase : List[str] = 256
lowerCamelCase : Optional[Any] = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
lowerCamelCase : str = 151
lowerCamelCase : Any = 512
lowerCamelCase : Optional[Any] = "ade20k-id2label.json"
lowerCamelCase : Optional[int] = True
elif task_name.startswith("voc_" ):
lowerCamelCase : Optional[int] = 21
lowerCamelCase : int = 512
lowerCamelCase : Optional[int] = "pascal-voc-id2label.json"
lowerCamelCase : Tuple = True
# orig_config
lowerCamelCase : Tuple = load_orig_config_file(a__ )
assert getattr(a__ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase : Dict = getattr(a__ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(a__ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase : List[Any] = getattr(a__ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase : int = getattr(a__ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase : Optional[Any] = getattr(a__ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
lowerCamelCase : Union[str, Any] = getattr(a__ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
lowerCamelCase : Optional[Any] = getattr(a__ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
lowerCamelCase : Optional[Any] = "huggingface/label-files"
lowerCamelCase : Tuple = json.load(open(hf_hub_download(a__ , a__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase : Dict = {int(a__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = dct.pop(a__ )
lowerCamelCase : Tuple = val
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "mobilevitv2."
lowerCamelCase : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase : Any = k[8:]
else:
lowerCamelCase : List[str] = k
if ".block." in k:
lowerCamelCase : int = k_new.replace(".block." , "." )
if ".conv." in k:
lowerCamelCase : str = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
lowerCamelCase : List[Any] = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
lowerCamelCase : int = k_new.replace("conv_1." , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
lowerCamelCase : Optional[int] = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
lowerCamelCase : Union[str, Any] = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
lowerCamelCase : Union[str, Any] = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
lowerCamelCase : Optional[int] = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
lowerCamelCase : Optional[int] = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
lowerCamelCase : Any = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase : List[str] = [0, 1]
elif i == 4:
lowerCamelCase : List[Any] = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase : Dict = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
lowerCamelCase : Optional[int] = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
lowerCamelCase : Tuple = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
lowerCamelCase : Optional[Any] = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
lowerCamelCase : List[Any] = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
lowerCamelCase : Tuple = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
lowerCamelCase : Optional[int] = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
lowerCamelCase : Optional[int] = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
lowerCamelCase : Union[str, Any] = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
lowerCamelCase : str = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
lowerCamelCase : Any = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
lowerCamelCase : str = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
lowerCamelCase : List[str] = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(a__ )
for k in keys_to_ignore:
state_dict.pop(a__ , a__ )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase : Optional[int] = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = get_mobilevitva_config(a__ , a__ )
# load original state_dict
lowerCamelCase : Union[str, Any] = torch.load(a__ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
lowerCamelCase : str = MobileViTVaForSemanticSegmentation(a__ ).eval()
lowerCamelCase : Any = False
else:
lowerCamelCase : Dict = MobileViTVaForImageClassification(a__ ).eval()
lowerCamelCase : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase : Tuple = checkpoint
remove_unused_keys(a__ )
lowerCamelCase : str = create_rename_keys(a__ , base_model=a__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a__ , a__ , a__ )
# load modified state_dict
model.load_state_dict(a__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase : List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase : str = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase : Optional[int] = model(**a__ )
# verify classification model
if task_name.startswith("imagenet" ):
lowerCamelCase : Any = outputs.logits
lowerCamelCase : Optional[int] = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase : Optional[int] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , a__ , atol=1E-4 )
Path(a__ ).mkdir(exist_ok=a__ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 283
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Any = DanceDiffusionPipeline
A__ : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A__ : List[Any] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
A__ : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A__ : str = False
A__ : Any = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase_ = IPNDMScheduler()
UpperCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = DanceDiffusionPipeline(**__UpperCamelCase )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ = pipe(**__UpperCamelCase )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase_ = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 122
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__A : str = True
except (ImportError, ModuleNotFoundError):
__A : Optional[int] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
re.sub("<n>" , "" , lowercase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase__ ) )
| 57
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__A : Any = imread(R'digital_image_processing/image_data/lena_small.jpg')
__A : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = cn.convert_to_negative(lowercase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase__ , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A = canny.canny(lowercase__ )
# assert canny array for at least one True
assert canny_array.any()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
# laplace diagonals
A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ )
assert res.any()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
assert med.median_filter(lowercase__ , 3 ).any()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A , A = sob.sobel_filter(lowercase__ )
assert grad.any() and theta.any()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = sp.make_sepia(lowercase__ , 20 )
assert sepia.all()
def __SCREAMING_SNAKE_CASE ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
A = bs.Burkes(imread(lowercase__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __SCREAMING_SNAKE_CASE ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
A = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
A = imread(lowercase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A = 0
A = 0
A = image[x_coordinate][y_coordinate]
A = lbp.get_neighbors_pixel(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ )
assert lbp_image.any()
| 57
| 1
|
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 97
|
"""simple docstring"""
import argparse
import json
import subprocess
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> List[Any]:
lowerCamelCase_ = []
lowerCamelCase_ = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCamelCase_ = subprocess.run(_lowerCamelCase , shell=_lowerCamelCase , stdout=subprocess.PIPE )
lowerCamelCase_ = output.stdout.decode('utf-8' )
lowerCamelCase_ = json.loads(_lowerCamelCase )
lowerCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Tuple:
return values.split(',' )
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 183
| 0
|
'''simple docstring'''
import torch
from transformers import AutoModel
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : Tuple="sayef/fsner-bert-base-uncased" ) -> List[str]:
super(snake_case , self ).__init__()
__UpperCAmelCase : List[Any] = AutoModel.from_pretrained(snake_case , return_dict=snake_case )
__UpperCAmelCase : int = torch.nn.CosineSimilarity(3 , 1E-08 )
__UpperCAmelCase : Tuple = torch.nn.Softmax(dim=1 )
def lowerCamelCase__ ( self : Optional[Any] , **snake_case : Tuple ) -> Dict:
return self.bert(**snake_case ).last_hidden_state
def lowerCamelCase__ ( self : Optional[Any] , snake_case : int ) -> Dict:
return token_embeddings.sum(2 , keepdim=snake_case )
def lowerCamelCase__ ( self : str , snake_case : int , snake_case : Union[str, Any] , snake_case : int=1 ) -> Any:
return self.softmax(T * self.cos(snake_case , snake_case ) )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int , snake_case : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = W_supports['''sizes'''].tolist()
__UpperCAmelCase : Tuple = W_supports['''start_token_id'''].item()
__UpperCAmelCase : Tuple = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCAmelCase : Optional[int] = self.BERT(**snake_case )
__UpperCAmelCase : Optional[Any] = self.BERT(**snake_case )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[int] = W_supports['''input_ids'''] == start_token_id
__UpperCAmelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(snake_case ):
if i == 0:
__UpperCAmelCase : Dict = 0
else:
__UpperCAmelCase : Optional[int] = support_sizes[i - 1]
__UpperCAmelCase : List[str] = S[s : s + size][start_token_masks[s : s + size]]
__UpperCAmelCase : List[Any] = S[s : s + size][end_token_masks[s : s + size]]
__UpperCAmelCase : str = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__UpperCAmelCase : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCAmelCase : Optional[Any] = torch.vstack((p_starts, p_start) )
__UpperCAmelCase : Any = torch.vstack((p_ends, p_end) )
else:
__UpperCAmelCase : List[Any] = p_start
__UpperCAmelCase : Tuple = p_end
return p_starts, p_ends
| 361
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : torch.FloatTensor
class a ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self : str , snake_case : int = 32 , snake_case : int = 64 , snake_case : int = 20 , snake_case : int = 768 , snake_case : Tuple=77 , snake_case : List[Any]=4 , snake_case : float = 0.0 , snake_case : str = "silu" , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional[str] = "linear" , snake_case : Optional[str] = "prd" , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , ) -> List[str]:
super().__init__()
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : List[str] = attention_head_dim
__UpperCAmelCase : int = num_attention_heads * attention_head_dim
__UpperCAmelCase : List[Any] = additional_embeddings
__UpperCAmelCase : Any = time_embed_dim or inner_dim
__UpperCAmelCase : Any = embedding_proj_dim or embedding_dim
__UpperCAmelCase : Union[str, Any] = clip_embed_dim or embedding_dim
__UpperCAmelCase : List[Any] = Timesteps(snake_case , snake_case , 0 )
__UpperCAmelCase : Optional[Any] = TimestepEmbedding(snake_case , snake_case , out_dim=snake_case , act_fn=snake_case )
__UpperCAmelCase : str = nn.Linear(snake_case , snake_case )
if embedding_proj_norm_type is None:
__UpperCAmelCase : str = None
elif embedding_proj_norm_type == "layer":
__UpperCAmelCase : str = nn.LayerNorm(snake_case )
else:
raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
__UpperCAmelCase : List[Any] = nn.Linear(snake_case , snake_case )
if encoder_hid_proj_type is None:
__UpperCAmelCase : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
__UpperCAmelCase : Any = nn.Linear(snake_case , snake_case )
else:
raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
__UpperCAmelCase : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case ) )
if added_emb_type == "prd":
__UpperCAmelCase : Any = nn.Parameter(torch.zeros(1 , 1 , snake_case ) )
elif added_emb_type is None:
__UpperCAmelCase : List[Any] = None
else:
raise ValueError(
f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
__UpperCAmelCase : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
snake_case , snake_case , snake_case , dropout=snake_case , activation_fn='''gelu''' , attention_bias=snake_case , )
for d in range(snake_case )
] )
if norm_in_type == "layer":
__UpperCAmelCase : Tuple = nn.LayerNorm(snake_case )
elif norm_in_type is None:
__UpperCAmelCase : List[Any] = None
else:
raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.' )
__UpperCAmelCase : Dict = nn.LayerNorm(snake_case )
__UpperCAmelCase : Any = nn.Linear(snake_case , snake_case )
__UpperCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
__UpperCAmelCase : str = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , snake_case , persistent=snake_case )
__UpperCAmelCase : Tuple = nn.Parameter(torch.zeros(1 , snake_case ) )
__UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(1 , snake_case ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase__ ( self : int ) -> Dict[str, AttentionProcessor]:
__UpperCAmelCase : Optional[Any] = {}
def fn_recursive_add_processors(snake_case : str , snake_case : torch.nn.Module , snake_case : Dict[str, AttentionProcessor] ):
if hasattr(snake_case , '''set_processor''' ):
__UpperCAmelCase : Union[str, Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , snake_case , snake_case )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case , snake_case , snake_case )
return processors
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(snake_case , snake_case ) and len(snake_case ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(snake_case )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(snake_case : str , snake_case : torch.nn.Module , snake_case : int ):
if hasattr(snake_case , '''set_processor''' ):
if not isinstance(snake_case , snake_case ):
module.set_processor(snake_case )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , snake_case , snake_case )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case , snake_case , snake_case )
def lowerCamelCase__ ( self : str ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Union[torch.Tensor, float, int] , snake_case : torch.FloatTensor , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[torch.BoolTensor] = None , snake_case : bool = True , ) -> List[Any]:
__UpperCAmelCase : Any = hidden_states.shape[0]
__UpperCAmelCase : Optional[int] = timestep
if not torch.is_tensor(snake_case ):
__UpperCAmelCase : str = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(snake_case ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : Any = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps * torch.ones(snake_case , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase : Tuple = self.time_proj(snake_case )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCAmelCase : int = timesteps_projected.to(dtype=self.dtype )
__UpperCAmelCase : Optional[int] = self.time_embedding(snake_case )
if self.embedding_proj_norm is not None:
__UpperCAmelCase : Optional[Any] = self.embedding_proj_norm(snake_case )
__UpperCAmelCase : str = self.embedding_proj(snake_case )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCAmelCase : Dict = self.encoder_hidden_states_proj(snake_case )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
__UpperCAmelCase : Optional[int] = self.proj_in(snake_case )
__UpperCAmelCase : Optional[Any] = self.positional_embedding.to(hidden_states.dtype )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCAmelCase : Optional[int] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCAmelCase : Union[str, Any] = hidden_states[:, None, :]
__UpperCAmelCase : Union[str, Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCAmelCase : Any = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case , -1 , -1 )
additional_embeds.append(snake_case )
__UpperCAmelCase : Dict = torch.cat(
snake_case , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCAmelCase : str = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCAmelCase : Union[str, Any] = F.pad(
snake_case , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCAmelCase : Optional[int] = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCAmelCase : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
__UpperCAmelCase : str = F.pad(snake_case , (0, self.additional_embeddings) , value=0.0 )
__UpperCAmelCase : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCAmelCase : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCAmelCase : str = self.norm_in(snake_case )
for block in self.transformer_blocks:
__UpperCAmelCase : Optional[int] = block(snake_case , attention_mask=snake_case )
__UpperCAmelCase : int = self.norm_out(snake_case )
if self.prd_embedding is not None:
__UpperCAmelCase : Optional[int] = hidden_states[:, -1]
else:
__UpperCAmelCase : List[Any] = hidden_states[:, additional_embeddings_len:]
__UpperCAmelCase : Dict = self.proj_to_clip_embeddings(snake_case )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[str] ) -> str:
__UpperCAmelCase : Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 240
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def A_ ( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> List[str]:
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCamelCase : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : Any = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase : Union[str, Any] = 8
else:
UpperCamelCase : List[str] = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCamelCase : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCamelCase : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Any = mocked_dataloaders # noqa: F811
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCamelCase : str = 2
# New Code #
UpperCamelCase : Any = int(args.gradient_accumulation_steps )
UpperCamelCase : int = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config["lr"]
UpperCamelCase : str = int(config["num_epochs"] )
UpperCamelCase : List[str] = int(config["seed"] )
UpperCamelCase : Any = int(config["batch_size"] )
UpperCamelCase : Optional[int] = evaluate.load("glue" , "mrpc" )
set_seed(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : Any = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : List[str] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCamelCase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
with LocalSGD(
accelerator=_lowerCAmelCase , model=_lowerCAmelCase , local_sgd_steps=_lowerCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = model(**_lowerCAmelCase )
UpperCamelCase : List[Any] = output.loss
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(**_lowerCAmelCase )
UpperCamelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCamelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _lowerCAmelCase )
def A_ ( ) -> int:
UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=_lowerCAmelCase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCamelCase : Union[str, Any] = parser.parse_args()
UpperCamelCase : Union[str, Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 52
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple, _snake_case : Any, _snake_case : int=1_3, _snake_case : Optional[int]=3_2, _snake_case : Tuple=2, _snake_case : Any=3, _snake_case : Tuple=1_6, _snake_case : Tuple=[1, 2, 1], _snake_case : Dict=[2, 2, 4], _snake_case : str=2, _snake_case : Union[str, Any]=2.0, _snake_case : Dict=True, _snake_case : Dict=0.0, _snake_case : str=0.0, _snake_case : str=0.1, _snake_case : List[str]="gelu", _snake_case : int=False, _snake_case : Optional[Any]=True, _snake_case : List[Any]=0.0_2, _snake_case : Union[str, Any]=1e-5, _snake_case : Union[str, Any]=True, _snake_case : List[Any]=None, _snake_case : Any=True, _snake_case : List[Any]=1_0, _snake_case : str=8, ) ->Union[str, Any]:
snake_case__ : Any = parent
snake_case__ : Tuple = batch_size
snake_case__ : Tuple = image_size
snake_case__ : Any = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Tuple = embed_dim
snake_case__ : Any = depths
snake_case__ : Any = num_heads
snake_case__ : List[str] = window_size
snake_case__ : Dict = mlp_ratio
snake_case__ : Optional[int] = qkv_bias
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : str = hidden_act
snake_case__ : Union[str, Any] = use_absolute_embeddings
snake_case__ : Union[str, Any] = patch_norm
snake_case__ : Any = layer_norm_eps
snake_case__ : Tuple = initializer_range
snake_case__ : Dict = is_training
snake_case__ : Any = scope
snake_case__ : Optional[Any] = use_labels
snake_case__ : str = type_sequence_label_size
snake_case__ : List[Any] = encoder_stride
def lowercase_ ( self : Tuple ) ->str:
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Optional[int] ) ->Optional[int]:
return SwinvaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def lowercase_ ( self : Optional[int], _snake_case : str, _snake_case : List[str], _snake_case : int ) ->Dict:
snake_case__ : List[Any] = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = model(_snake_case )
snake_case__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self : Optional[Any], _snake_case : Any, _snake_case : List[str], _snake_case : Dict ) ->List[Any]:
snake_case__ : List[str] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : Optional[Any] = 1
snake_case__ : Optional[int] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Any = model(_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self : List[str], _snake_case : int, _snake_case : List[Any], _snake_case : Optional[int] ) ->Any:
snake_case__ : Tuple = self.type_sequence_label_size
snake_case__ : int = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : Any ) ->Dict:
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Union[str, Any] ) ->Dict:
snake_case__ : Optional[int] = SwinvaModelTester(self )
snake_case__ : int = ConfigTester(self, config_class=_snake_case, embed_dim=3_7 )
def lowercase_ ( self : Tuple ) ->int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Any ) ->str:
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def lowercase_ ( self : Any ) ->Union[str, Any]:
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def lowercase_ ( self : str ) ->Union[str, Any]:
pass
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
snake_case__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case, nn.Linear ) )
def lowercase_ ( self : List[str] ) ->Optional[int]:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(_snake_case )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], _snake_case )
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
for model_class in self.all_model_classes:
snake_case__ : str = True
snake_case__ : Union[str, Any] = False
snake_case__ : Tuple = True
snake_case__ : int = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(_snake_case, _snake_case ) )
snake_case__ : List[str] = outputs.attentions
snake_case__ : List[Any] = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ), _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : str = True
snake_case__ : Tuple = config.window_size**2
snake_case__ : Optional[int] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(_snake_case, _snake_case ) )
snake_case__ : Tuple = outputs.attentions
self.assertEqual(len(_snake_case ), _snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
snake_case__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
snake_case__ : Optional[int] = True
snake_case__ : Dict = True
snake_case__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(_snake_case, _snake_case ) )
if hasattr(self.model_tester, 'num_hidden_states_types' ):
snake_case__ : str = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case__ : Dict = 2
self.assertEqual(out_len + added_hidden_states, len(_snake_case ) )
snake_case__ : Any = outputs.attentions
self.assertEqual(len(_snake_case ), _snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
def lowercase_ ( self : Dict, _snake_case : Tuple, _snake_case : Any, _snake_case : int, _snake_case : Optional[int] ) ->str:
snake_case__ : Dict = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(_snake_case, _snake_case ) )
snake_case__ : Dict = outputs.hidden_states
snake_case__ : int = getattr(
self.model_tester, 'expected_num_hidden_layers', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ), _snake_case )
# Swinv2 has a different seq_length
snake_case__ : int = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
snake_case__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ), _snake_case )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = reshaped_hidden_states[0].shape
snake_case__ : Any = (
reshaped_hidden_states[0].view(_snake_case, _snake_case, height * width ).permute(0, 2, 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def lowercase_ ( self : str ) ->List[Any]:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = True
self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Dict = True
self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, _snake_case )
def lowercase_ ( self : List[str] ) ->str:
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = 3
snake_case__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case__ : int = True
self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, (padded_height, padded_width) )
def lowercase_ ( self : List[str] ) ->Optional[int]:
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def lowercase_ ( self : List[Any] ) ->str:
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def lowercase_ ( self : str ) ->Union[str, Any]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Dict = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowercase_ ( self : Optional[int] ) ->List[str]:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : Union[str, Any] ) ->List[str]:
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : int ) ->List[Any]:
snake_case__ : Any = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_snake_case )
snake_case__ : int = self.default_image_processor
snake_case__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
snake_case__ : Optional[Any] = image_processor(images=_snake_case, return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**_snake_case )
# verify the logits
snake_case__ : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, _snake_case )
snake_case__ : Optional[int] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _snake_case, atol=1e-4 ) )
| 277
| 0
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = RobertaTokenizer
__UpperCamelCase = RobertaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = {'cls_token': '<s>'}
def lowerCamelCase__ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase : int = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCAmelCase : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase : Dict = {'''unk_token''': '''<unk>'''}
lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def lowerCamelCase__ ( self : Dict , **UpperCamelCase_ : Tuple ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCamelCase__ ( self : str , **UpperCamelCase_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Dict ):
lowerCAmelCase : Optional[int] = '''lower newer'''
lowerCAmelCase : Optional[int] = '''lower newer'''
return input_text, output_text
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase : int = '''lower newer'''
lowerCAmelCase : Any = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase : int = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase : int = tokens + [tokenizer.unk_token]
lowerCAmelCase : Any = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained('''roberta-base''' )
lowerCAmelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = self.get_tokenizer()
lowerCAmelCase : Optional[Any] = '''Encode this sequence.'''
lowerCAmelCase : Optional[int] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCAmelCase : str = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCAmelCase : Dict = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
lowerCAmelCase : Dict = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
lowerCAmelCase : str = '''Encode <mask> sequence'''
lowerCAmelCase : Optional[Any] = '''Encode <mask>sequence'''
lowerCAmelCase : Dict = tokenizer.encode(__lowerCAmelCase )
lowerCAmelCase : List[str] = encoded.index(__lowerCAmelCase )
lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCAmelCase )
lowerCAmelCase : Optional[Any] = encoded.index(__lowerCAmelCase )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( self : Any ):
pass
def lowerCamelCase__ ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase : str = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase : List[Any] = '''A, <mask> AllenNLP sentence.'''
lowerCAmelCase : Optional[Any] = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
lowerCAmelCase : Dict = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCamelCase__ ( self : List[str] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __lowerCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __lowerCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __lowerCAmelCase )
def lowerCamelCase__ ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase : Any = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase : Union[str, Any] = F'''{text_of_1_token} {text_of_1_token}'''
lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : int = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : List[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : List[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : List[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Union[str, Any] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : List[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : Optional[int] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : Any = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 360
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 50000000 ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) )
lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) )
for primea in primes:
lowerCAmelCase : Optional[Any] = primea * primea
for primea in primes:
lowerCAmelCase : List[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCAmelCase : Tuple = primea * primea * primea * primea
lowerCAmelCase : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Tuple:
lowercase_ : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Any:
lowercase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase_ : int = x_den * y_den * z_den
lowercase_ : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase ( UpperCAmelCase__ : int = 35 ) -> List[str]:
lowercase_ : set = set()
lowercase_ : int
lowercase_ : Fraction = Fraction(0 )
lowercase_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase_ : Union[str, Any] = x_num * y_den + x_den * y_num
lowercase_ : Optional[Any] = x_den * y_den
lowercase_ : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
lowercase_ : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase_ : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowercase_ : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
lowercase_ : int = x_num * y_num
lowercase_ : Optional[Any] = x_den * y_num + x_num * y_den
lowercase_ : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
lowercase_ : List[Any] = x_num * x_num * y_num * y_num
lowercase_ : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowercase_ : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 239
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "umt5"
A_ = ["past_key_values"]
def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
__a : Any = vocab_size
__a : Any = d_model
__a : str = d_kv
__a : Dict = d_ff
__a : Union[str, Any] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : Tuple = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : Optional[int] = dropout_rate
__a : List[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Any = use_cache
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : Dict = act_info[-1]
__a : Dict = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_heads
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_layers
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Dict = 'past_encoder_sequence + sequence'
__a : Tuple = {0: 'batch'}
__a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 5E-4
| 27
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
A = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
A = '''>>zh<<'''
A = '''Helsinki-NLP/'''
if is_torch_available():
A = '''pt'''
elif is_tf_available():
A = '''tf'''
else:
A = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MarianTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : int = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__a : Optional[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : Optional[int] = Path(self.tmpdirname )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
__a : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return (
"This is a test",
"This is a test",
)
def _lowerCamelCase ( self ):
__a : Tuple = '''</s>'''
__a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_UpperCAmelCase ) , 9 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _lowerCamelCase ( self ):
__a : Tuple = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
__a : Dict = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__a : str = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(_UpperCAmelCase , batch.input_ids[0] )
__a : Optional[int] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_UpperCAmelCase )
__a : Dict = [x.name for x in Path(_UpperCAmelCase ).glob('''*''' )]
self.assertIn('''source.spm''' , _UpperCAmelCase )
MarianTokenizer.from_pretrained(_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Any = self.get_tokenizer()
__a : Optional[int] = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_tokenizer()
__a : Union[str, Any] = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def _lowerCamelCase ( self ):
__a : str = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
__a : List[str] = '''Tämä on testi'''
__a : str = '''This is a test'''
__a : Tuple = [76, 7, 2047, 2]
__a : Union[str, Any] = [69, 12, 11, 940, 2]
__a : Union[str, Any] = tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : List[str] = tokenizer(text_target=_UpperCAmelCase ).input_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : int = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 188
|
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A = datasets.logging.get_logger(__name__)
A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.config_name == "default":
__a : List[str] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if gpus is None:
__a : str = 1 if torch.cuda.is_available() else 0
__a : Optional[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
__a : Dict = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )]
__a , __a : int = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 188
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
A : Optional[Any] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
A : Optional[int] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
A : Any = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def snake_case ( self , __a , __a , __a = False , __a = False , __a = False , __a = False , ):
__lowerCAmelCase = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__lowerCAmelCase = [[refs[i] for refs in references] for i in range(__a )]
__lowerCAmelCase = TER(
normalized=__a , no_punct=__a , asian_support=__a , case_sensitive=__a , )
__lowerCAmelCase = sb_ter.corpus_score(__a , __a )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : int = ["""input_values""", """padding_mask"""]
def __init__(self : Tuple , __a : int = 1 , __a : int = 24000 , __a : float = 0.0 , __a : float = None , __a : float = None , **__a : str , ):
super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a )
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = overlap
@property
def _lowercase (self : Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase (self : List[Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__(self : int , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : Optional[Union[bool, str, PaddingStrategy]] = None , __a : Optional[bool] = False , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase_ = True
UpperCAmelCase_ = bool(
isinstance(__a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase_ = [np.asarray(__a , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__a , np.ndarray ):
UpperCAmelCase_ = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [np.asarray(__a ).T]
# verify inputs are valid
for idx, example in enumerate(__a ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
UpperCAmelCase_ = None
UpperCAmelCase_ = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase_ = min(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase_ = max(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase_ = "max_length"
else:
UpperCAmelCase_ = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase_ = self.pad(
__a , max_length=__a , truncation=__a , padding=__a , return_attention_mask=__a , )
if padding:
UpperCAmelCase_ = padded_inputs.pop("attention_mask" )
UpperCAmelCase_ = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
UpperCAmelCase_ = example[..., None]
input_values.append(example.T )
UpperCAmelCase_ = input_values
if return_tensors is not None:
UpperCAmelCase_ = padded_inputs.convert_to_tensors(__a )
return padded_inputs
| 106
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: int ={
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_: Union[str, Any] =['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_: List[str] =[
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 106
| 1
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowercase : List[Any] = logging.getLogger(__name__)
_lowercase : Optional[int] = 5_0 # max width of layer names
_lowercase : Any = 7_0 # max width of quantizer names
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=__lowerCAmelCase , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=__lowerCAmelCase , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=__lowerCAmelCase , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=__lowerCAmelCase , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=__lowerCAmelCase , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=__lowerCAmelCase , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if args.calibrator == "max":
lowercase_ : str = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
lowercase_ : List[str] = '''histogram'''
elif args.calibrator == "mse":
lowercase_ : Optional[Any] = '''histogram'''
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
lowercase_ : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=__lowerCAmelCase )
lowercase_ : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCAmelCase )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowerCAmelCase , ['''embeddings'''] , which='''weight''' , _disabled=__lowerCAmelCase )
if args.quant_disable:
set_quantizer_by_name(__lowerCAmelCase , [''''''] , _disabled=__lowerCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowerCAmelCase , args.quant_disable_keyword , _disabled=__lowerCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=__lowerCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=__lowerCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(__lowerCAmelCase )
if args.fuse_qkv:
fuse_qkv(__lowerCAmelCase , __lowerCAmelCase )
if args.clip_gelu:
clip_gelu(__lowerCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowerCAmelCase )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowerCAmelCase )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
def fusea(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(__lowerCAmelCase , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
lowercase_ : Union[str, Any] = qq._amax.detach().item()
lowercase_ : str = qk._amax.detach().item()
lowercase_ : List[Any] = qv._amax.detach().item()
lowercase_ : Tuple = max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
qq._amax.fill_(__lowerCAmelCase )
qk._amax.fill_(__lowerCAmelCase )
qv._amax.fill_(__lowerCAmelCase )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
lowercase_ : List[str] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCAmelCase )
lowercase_ : Optional[Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
lowercase_ : Dict = mod.weight.shape[0]
lowercase_ : Union[str, Any] = mod._weight_quantizer._amax.detach()
lowercase_ : Tuple = torch.ones(__lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowercase_ : str = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowercase_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
lowercase_ : Tuple = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowerCAmelCase , keepdims=__lowerCAmelCase ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowercase_ : List[Any] = amax
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]=25 , __SCREAMING_SNAKE_CASE : List[Any]=180 , __SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
if ignore is None:
lowercase_ : Optional[Any] = []
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ : Optional[Any] = [ignore]
lowercase_ : List[str] = 0
for name, mod in model.named_modules():
if not hasattr(__lowerCAmelCase , '''weight''' ):
continue
lowercase_ : List[Any] = max(__lowerCAmelCase , len(__lowerCAmelCase ) )
for name, mod in model.named_modules():
lowercase_ : Dict = getattr(__lowerCAmelCase , '''_input_quantizer''' , __lowerCAmelCase )
lowercase_ : Dict = getattr(__lowerCAmelCase , '''_weight_quantizer''' , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , '''weight''' ):
continue
if type(__lowerCAmelCase ) in ignore:
continue
if [True for s in ignore if type(__lowerCAmelCase ) is str and s in name]:
continue
lowercase_ : Dict = F'''Act:{input_q.extra_repr()}'''
lowercase_ : Union[str, Any] = F'''Wgt:{weight_q.extra_repr()}'''
lowercase_ : Tuple = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__lowerCAmelCase ) <= line_width:
logger.info(__lowerCAmelCase )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(__lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : List[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if quantizer_mod is not None:
assert hasattr(__lowerCAmelCase , __lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]="both" , **__SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Union[str, Any] = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '''_input_quantizer''' , __lowerCAmelCase , __lowerCAmelCase )
if which in ["weight", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '''_weight_quantizer''' , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '''_input_quantizer''' ) or hasattr(__lowerCAmelCase , '''_weight_quantizer''' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
set_quantizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ : List[Any] = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase )
| 93
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
# Construct model
if gpta_config_file == "":
a__ = GPTaConfig()
else:
a__ = GPTaConfig.from_json_file(__lowerCAmelCase )
a__ = GPTaModel(__lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
a__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
snake_case : Any = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 240
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase__ = random.Random()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=1.0 , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if rng is None:
UpperCAmelCase : Union[str, Any] = global_rng
UpperCAmelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Dict , lowercase_ : List[Any]=7 , lowercase_ : Union[str, Any]=400 , lowercase_ : List[Any]=2_000 , lowercase_ : int=1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : List[Any]=16_000 , lowercase_ : int=True , lowercase_ : List[Any]=80 , lowercase_ : Tuple=16 , lowercase_ : Optional[Any]=64 , lowercase_ : int="hann_window" , lowercase_ : str=80 , lowercase_ : List[Any]=7_600 , lowercase_ : Optional[Any]=1E-10 , lowercase_ : Optional[Any]=True , ) -> Tuple:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : str = min_seq_length
UpperCAmelCase : Dict = max_seq_length
UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase : Any = feature_size
UpperCAmelCase : List[Any] = padding_value
UpperCAmelCase : str = sampling_rate
UpperCAmelCase : Optional[int] = do_normalize
UpperCAmelCase : Dict = num_mel_bins
UpperCAmelCase : List[str] = hop_length
UpperCAmelCase : List[Any] = win_length
UpperCAmelCase : Union[str, Any] = win_function
UpperCAmelCase : Tuple = fmin
UpperCAmelCase : Union[str, Any] = fmax
UpperCAmelCase : Union[str, Any] = mel_floor
UpperCAmelCase : Tuple = return_attention_mask
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : int=False , lowercase_ : Dict=False ) -> int:
def _flatten(lowercase_ : Optional[Any] ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
UpperCAmelCase : Any = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase : int = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase : Union[str, Any] = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False ) -> Optional[int]:
if equal_length:
UpperCAmelCase : List[str] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase : List[Any] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase : List[Any] = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = SpeechTaFeatureExtractor
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
UpperCAmelCase : List[str] = SpeechTaFeatureExtractionTester(self )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCAmelCase_ ( self : str ) -> Dict:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase : Optional[int] = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase : Optional[int] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
UpperCAmelCase : Any = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
UpperCAmelCase : List[str] = feat_extract(lowercase_ , return_tensors='np' ).input_values
UpperCAmelCase : Optional[Any] = feat_extract(lowercase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase : Any = ['longest', 'max_length', 'do_not_pad']
UpperCAmelCase : Dict = [None, 1_600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
UpperCAmelCase : Union[str, Any] = feat_extract(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors='np' )
UpperCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def UpperCAmelCase_ ( self : str ) -> Tuple:
UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Dict = range(800 , 1_400 , 200 )
UpperCAmelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCAmelCase : Any = [None, 1_600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
UpperCAmelCase : Tuple = feat_extract(lowercase_ , max_length=lowercase_ , padding=lowercase_ )
UpperCAmelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase : Any = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1_000 , padding='max_length' , return_tensors='np' )
UpperCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase : Tuple = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1_000 , padding='longest' , return_tensors='np' )
UpperCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase : Union[str, Any] = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=2_000 , padding='longest' , return_tensors='np' )
UpperCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Union[str, Any] = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase : Dict = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase : List[str] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase : List[str] = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase : Optional[int] = feature_extractor(audio_target=lowercase_ , padding=lowercase_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase : Any = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
UpperCAmelCase : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
UpperCAmelCase : Tuple = feature_extractor(lowercase_ , return_tensors='np' ).input_values
UpperCAmelCase : str = feature_extractor(lowercase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase : Tuple = np.asarray(lowercase_ )
UpperCAmelCase : List[Any] = feature_extractor(lowercase_ , return_tensors='np' ).input_values
UpperCAmelCase : str = feature_extractor(lowercase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
UpperCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase : Optional[int] = feat_extract.model_input_names[0]
UpperCAmelCase : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowercase_ ) == len(lowercase_ ) for x, y in zip(lowercase_ , processed_features[input_name] ) ) )
UpperCAmelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase_ )
UpperCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
UpperCAmelCase : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase_ )
UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase : str = feat_extract.model_input_names[0]
UpperCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
UpperCAmelCase : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase_ ( self : str ) -> List[str]:
UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase : Any = feat_extract.model_input_names[0]
UpperCAmelCase : str = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase : Tuple = feat_extract.num_mel_bins # hack!
UpperCAmelCase : Tuple = feat_extract.pad(lowercase_ , padding='longest' , return_tensors='np' )[input_name]
UpperCAmelCase : List[Any] = feat_extract.pad(lowercase_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase : str = self.feat_extract_dict
UpperCAmelCase : int = True
UpperCAmelCase : str = self.feature_extraction_class(**lowercase_ )
UpperCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase : Any = [len(lowercase_ ) for x in speech_inputs]
UpperCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
UpperCAmelCase : Dict = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase : int = feat_extract.num_mel_bins # hack!
UpperCAmelCase : Any = feat_extract.pad(lowercase_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , lowercase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = self.feat_extract_dict
UpperCAmelCase : Dict = True
UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**lowercase_ )
UpperCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase : Union[str, Any] = [len(lowercase_ ) for x in speech_inputs]
UpperCAmelCase : List[Any] = feat_extract.model_input_names[0]
UpperCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase : int = min(lowercase_ )
UpperCAmelCase : Optional[Any] = feat_extract.num_mel_bins # hack!
UpperCAmelCase : List[Any] = feat_extract.pad(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='np' )
self.assertIn('attention_mask' , lowercase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[str] ) -> int:
from datasets import load_dataset
UpperCAmelCase : Union[str, Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCAmelCase : Tuple = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase : Optional[Any] = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase : Tuple = self._load_datasamples(1 )
UpperCAmelCase : int = SpeechTaFeatureExtractor()
UpperCAmelCase : List[str] = feature_extractor(lowercase_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowercase_ , atol=1E-6 ) )
def UpperCAmelCase_ ( self : int ) -> Any:
# fmt: off
UpperCAmelCase : List[str] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
UpperCAmelCase : List[Any] = self._load_datasamples(1 )
UpperCAmelCase : int = SpeechTaFeatureExtractor()
UpperCAmelCase : List[Any] = feature_extractor(audio_target=lowercase_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowercase_ , atol=1E-4 ) )
| 368
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Union[str, Any]=7 , lowercase_ : Union[str, Any]=3 , lowercase_ : int=30 , lowercase_ : Tuple=400 , lowercase_ : Tuple=True , lowercase_ : Optional[int]=None , lowercase_ : List[str]=0.9 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=True , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : List[str]=[0.5, 0.5, 0.5] , ) -> Tuple:
UpperCAmelCase : Optional[int] = size if size is not None else {'shortest_edge': 30}
UpperCAmelCase : int = crop_size if crop_size is not None else {'height': 30, 'width': 30}
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : int = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : str = do_resize_and_center_crop
UpperCAmelCase : int = size
UpperCAmelCase : Dict = crop_pct
UpperCAmelCase : Union[str, Any] = crop_size
UpperCAmelCase : Optional[int] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
def UpperCAmelCase_ ( self : str ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : Any = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
self.assertTrue(hasattr(lowercase_ , 'crop_pct' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : str = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : str ) -> Dict:
# Initialize image_processing
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 280
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'pegasus'
A_ : Union[str, Any] = ['past_key_values']
A_ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self : Any , a__ : Optional[int]=5_0265 , a__ : int=1024 , a__ : List[str]=12 , a__ : List[Any]=4096 , a__ : List[str]=16 , a__ : Any=12 , a__ : Union[str, Any]=4096 , a__ : Tuple=16 , a__ : Optional[Any]=0.0 , a__ : Any=0.0 , a__ : List[str]=True , a__ : Optional[int]=True , a__ : Union[str, Any]="gelu" , a__ : List[str]=1024 , a__ : int=0.1 , a__ : List[str]=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : Optional[int]=0 , a__ : Tuple=False , a__ : List[Any]=0 , a__ : Any=1 , a__ : List[Any]=1 , **a__ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
@property
def a (self : Any ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def a (self : Tuple ):
"""simple docstring"""
return self.d_model
| 24
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 1_6
lowerCamelCase_ = 3_2
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ = 16 ) -> Optional[int]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase_ = DatasetDict(
{
'train': dataset['train'].select(a_ ),
'validation': dataset['train'].select(a_ ),
'test': dataset['validation'],
} )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
a_ , padding='longest' , max_length=a_ , pad_to_multiple_of=a_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
lowerCAmelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
lowerCAmelCase_ = DataLoader(
tokenized_datasets['test'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase ( a_ , a_ ) -> Optional[int]:
# New Code #
lowerCAmelCase_ = []
# Download the dataset
lowerCAmelCase_ = load_dataset('glue' , 'mrpc' )
# Create our splits
lowerCAmelCase_ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['lr']
lowerCAmelCase_ = int(config['num_epochs'] )
lowerCAmelCase_ = int(config['seed'] )
lowerCAmelCase_ = int(config['batch_size'] )
lowerCAmelCase_ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(a_ )
# New Code #
# Create our folds:
lowerCAmelCase_ = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowerCAmelCase_ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(a_ ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = get_fold_dataloaders(
a_ , a_ , a_ , a_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=a_ )
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=100 , num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ = model(**a_ )
lowerCAmelCase_ = outputs.loss
lowerCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a_ , references=a_ , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , a_ )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase_ = []
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(a_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase_ = torch.cat(a_ , dim=0 )
lowerCAmelCase_ = torch.stack(a_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase_ = metric.compute(predictions=a_ , references=a_ )
accelerator.print('Average test metrics from all folds:' , a_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowerCAmelCase_ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a_ , default=a_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=a_ , default=3 , help='The number of splits to perform across the dataset' )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 14
|
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = data
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower()
lowerCAmelCase_ = queue.Queue()
lowerCAmelCase_ = TreeNode(int(a_ ) )
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = left_node
q.put(a_ )
lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = right_node
q.put(a_ )
raise
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = []
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(a_ )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(a_ )
lowerCAmelCase_ = n.left
# end of while means current node doesn't have left child
lowerCAmelCase_ = stack.pop()
# start to traverse its right child
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n:
stack.append(a_ )
lowerCAmelCase_ = n.left
lowerCAmelCase_ = stack.pop()
print(n.data , end=',' )
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ , lowerCAmelCase_ = [], []
lowerCAmelCase_ = node
stacka.append(a_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(a_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCamelCase_ = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 14
| 1
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = CTRLTokenizer
lowerCamelCase__ : Any = False
lowerCamelCase__ : List[Any] = False
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ =['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
a__ =dict(zip(lowercase_, range(len(lowercase_ ) ) ) )
a__ =['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
a__ ={'''unk_token''': '''<unk>'''}
a__ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
a__ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def _UpperCAmelCase ( self, **lowercase_ ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowercase_ )
def _UpperCAmelCase ( self, lowercase_ ) -> Optional[int]:
"""simple docstring"""
a__ ='''adapt react readapt apt'''
a__ ='''adapt react readapt apt'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
a__ ='''adapt react readapt apt'''
a__ ='''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
a__ =tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_, lowercase_ )
a__ =tokens + [tokenizer.unk_token]
a__ =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ), lowercase_ )
| 188
|
from __future__ import annotations
def UpperCAmelCase__ ( _A : float , _A : float , _A : float , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
# Base Case
if curr_ind == len(UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCAmelCase ) ):
if valid_connection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# Insert current vertex into path as next transition
snake_case_ = next_ver
# Validate created path
if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ = -1
return False
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 0 ) -> list[int]:
snake_case_ = [-1] * (len(UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ = snake_case_ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , 1 ) else []
| 312
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 1
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __SCREAMING_SNAKE_CASE ( A_ = 1_00_00_00 , A_ = 10 ):
lowerCAmelCase__ : defaultdict = defaultdict(A_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase__ : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase__ : Tuple = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 106
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
a_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowercase ( snake_case_ : List[str] ,snake_case_ : Dict ,snake_case_ : Optional[int] ,snake_case_ : Dict ,snake_case_ : str ) ->Optional[int]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__A : List[str] = getattr(snake_case_ ,snake_case_ )
if weight_type is not None:
__A : Union[str, Any] = getattr(snake_case_ ,snake_case_ ).shape
else:
__A : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__A : Union[str, Any] = value
elif weight_type == "weight_g":
__A : Optional[int] = value
elif weight_type == "weight_v":
__A : str = value
elif weight_type == "bias":
__A : Any = value
else:
__A : List[str] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : int ) ->List[str]:
'''simple docstring'''
__A : Any = []
__A : Optional[int] = fairseq_model.state_dict()
__A : Optional[int] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__A : Union[str, Any] = None
for name, value in fairseq_dict.items():
__A : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,hf_model.config.feat_extract_norm == '''group''' ,)
__A : Any = True
elif name.split('''.''' )[0] == "proj":
__A : List[str] = fairseq_model.proj
__A : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A : Any = True
if "*" in mapped_key:
__A : Union[str, Any] = name.split(snake_case_ )[0].split('''.''' )[-2]
__A : int = mapped_key.replace('''*''' ,snake_case_ )
if "weight_g" in name:
__A : Any = '''weight_g'''
elif "weight_v" in name:
__A : str = '''weight_v'''
elif "bias" in name:
__A : List[Any] = '''bias'''
elif "weight" in name:
__A : Tuple = '''weight'''
else:
__A : int = None
set_recursively(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __lowercase ( snake_case_ : Optional[Any] ,snake_case_ : List[Any] ,snake_case_ : str ,snake_case_ : Optional[Any] ,snake_case_ : List[str] ) ->Dict:
'''simple docstring'''
__A : Dict = full_name.split('''conv_layers.''' )[-1]
__A : List[str] = name.split('''.''' )
__A : Dict = int(items[0] )
__A : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__A : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__A : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__A : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__A : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
def __lowercase ( snake_case_ : List[str] ) ->int:
'''simple docstring'''
__A , __A : Union[str, Any] = emb.weight.shape
__A : Union[str, Any] = nn.Linear(snake_case_ ,snake_case_ ,bias=snake_case_ )
__A : Optional[Any] = emb.weight.data
return lin_layer
def __lowercase ( snake_case_ : Optional[Any] ) ->List[Any]:
'''simple docstring'''
with open(snake_case_ ,'''r''' ,encoding='''utf-8''' ) as f:
__A : Tuple = f.readlines()
__A : Optional[int] = [line.split(''' ''' )[0] for line in lines]
__A : Optional[Any] = len(snake_case_ )
__A : Optional[Any] = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(snake_case_ ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : int ,snake_case_ : str ,snake_case_ : Tuple ,snake_case_ : Tuple ,snake_case_ : Dict ,snake_case_ : Any ,) ->List[str]:
'''simple docstring'''
__A : List[Any] = WavaVecaConfig.from_pretrained(snake_case_ )
__A : Any = SpeechaTextaConfig.from_pretrained(
snake_case_ ,vocab_size=snake_case_ ,decoder_layers=snake_case_ ,do_stable_layer_norm=snake_case_ )
__A : Tuple = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=snake_case_ ,return_attention_mask=snake_case_ ,)
__A , __A , __A : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A : int = model[0].eval()
# set weights for wav2vec2 encoder
__A : Optional[Any] = WavaVecaModel(snake_case_ )
__A : Dict = recursively_load_weights_wavaveca(model.encoder ,snake_case_ )
__A : Optional[int] = SpeechaTextaForCausalLM(snake_case_ )
__A , __A : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=snake_case_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__A : Dict = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__A : Tuple = SpeechEncoderDecoderModel(encoder=snake_case_ ,decoder=snake_case_ )
__A : Optional[Any] = False
# add projection layer
__A : List[Any] = nn.Parameter(projection_layer.weight )
__A : Optional[Any] = nn.Parameter(projection_layer.bias )
__A : Tuple = create_vocab_dict(snake_case_ )
with open(os.path.join(snake_case_ ,'''vocab.json''' ) ,'''w''' ) as fp:
json.dump(snake_case_ ,snake_case_ )
__A : List[Any] = SpeechaTextaTokenizer(os.path.join(snake_case_ ,'''vocab.json''' ) )
tokenizer.save_pretrained(snake_case_ )
__A : Dict = hf_wavavec.config.to_dict()
__A : List[str] = tokenizer.pad_token_id
__A : Any = tokenizer.bos_token_id
__A : int = tokenizer.eos_token_id
__A : Tuple = '''speech_to_text_2'''
__A : Tuple = '''wav2vec2'''
__A : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
a_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 291
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291
| 1
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowercase__ = '''.'''
if __name__ == "__main__":
lowercase__ = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
lowercase__ = []
lowercase__ = []
with open(doctest_file_path) as fp:
for line in fp:
lowercase__ = line.strip()
lowercase__ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowercase__ = '''\n'''.join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 290
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__A : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : int = {'unk_token': '<unk>'}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : Optional[int] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Optional[int] = self.get_image_processor(do_normalize=_A )
__A : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = image_processor(_A , return_tensors='np' )
__A : str = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : str = processor(text=_A , return_tensors='np' )
__A : List[str] = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Any = 'google/owlvit-base-patch32'
__A : int = OwlViTProcessor.from_pretrained(_A )
__A : Dict = ['cat', 'nasa badge']
__A : Optional[Any] = processor(text=_A )
__A : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : Dict = [['cat', 'nasa badge'], ['person']]
__A : Dict = processor(text=_A )
__A : Optional[int] = 16
__A : Any = len(_A )
__A : Union[str, Any] = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Union[str, Any] = ['cat', 'nasa badge']
__A : Tuple = processor(text=_A )
__A : str = 16
__A : int = inputs['input_ids']
__A : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 280
| 0
|
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Dict = [mem.copy() for i in range(6 )]
_UpperCAmelCase : int = [mem.copy() for i in range(6 )]
_UpperCAmelCase : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = Text("CPU" , font_size=24 )
_UpperCAmelCase : Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : str = [mem.copy() for i in range(4 )]
_UpperCAmelCase : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Dict = Text("GPU" , font_size=24 )
_UpperCAmelCase : List[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : int = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = []
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCAmelCase : Optional[int] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
cpu_targs.append(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Loaded Checkpoint" , font_size=24 )
_UpperCAmelCase : Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , aligned_edge=lowerCamelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_UpperCAmelCase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.play(Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Dict = fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
first_animations.append(GrowFromCenter(lowerCamelCase__ , run_time=1 ) )
_UpperCAmelCase : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 352
|
'''simple docstring'''
import pytest
lowerCamelCase__ = '__dummy_dataset1__'
lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = dataset_loading_script_name
_UpperCAmelCase : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py"""
with open(__lowerCAmelCase , "w" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 322
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : List[str] = 16
_lowerCamelCase : int = 32
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 16 ) -> List[str]:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = DatasetDict(
{
'''train''': dataset['''train'''].select(lowercase_ ),
'''validation''': dataset['''train'''].select(lowercase_ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ = DataLoader(
tokenized_datasets['''test'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
# Download the dataset
A__ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowercase_ ):
A__ , A__ , A__ = get_fold_dataloaders(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**lowercase_ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = outputs.logits
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowercase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(lowercase_ , dim=0 )
A__ = torch.stack(lowercase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=lowercase_ , references=lowercase_ )
accelerator.print('''Average test metrics from all folds:''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=lowercase_ , default=3 , help='''The number of splits to perform across the dataset''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 14
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
_lowerCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
assert len(UpperCAmelCase__) == len(
UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts."""
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 14
| 1
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE : Any = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Any = logging.get_verbosity()
SCREAMING_SNAKE_CASE : Any = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
SCREAMING_SNAKE_CASE : List[Any] = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowerCamelCase ) as cl:
logger.warning(_lowerCamelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowerCamelCase ) as cl:
logger.warning(_lowerCamelCase )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowerCamelCase ) as cl:
logger.warning(_lowerCamelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(_lowerCamelCase )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def __lowerCAmelCase ( self ) ->int:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
SCREAMING_SNAKE_CASE : List[str] = os.getenv('''TRANSFORMERS_VERBOSITY''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_verbosity()
self.assertEqual(
_lowerCamelCase , _lowerCamelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
SCREAMING_SNAKE_CASE : Optional[int] = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE : str = logging.logging.getLogger()
with CaptureLogger(_lowerCamelCase ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def __lowerCAmelCase ( self ) ->List[Any]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
SCREAMING_SNAKE_CASE : Dict = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowerCamelCase ) as cl:
logger.warning_advice(_lowerCamelCase )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowerCamelCase ) as cl:
logger.warning_advice(_lowerCamelCase )
self.assertEqual(cl.out , msg + '''\n''' )
def UpperCAmelCase_( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 361
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = DDIMPipeline
__SCREAMING_SNAKE_CASE : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
__SCREAMING_SNAKE_CASE : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = False
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler()
SCREAMING_SNAKE_CASE : Dict = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
SCREAMING_SNAKE_CASE : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
SCREAMING_SNAKE_CASE : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def __lowerCAmelCase ( self ) ->Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self ) ->Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = '''google/ddpm-cifar10-32'''
SCREAMING_SNAKE_CASE : Dict = UNetaDModel.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler()
SCREAMING_SNAKE_CASE : Optional[int] = DDIMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
ddim.to(_lowerCamelCase )
ddim.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = ddim(generator=_lowerCamelCase , eta=0.0 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = '''google/ddpm-ema-bedroom-256'''
SCREAMING_SNAKE_CASE : List[str] = UNetaDModel.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = DDIMScheduler.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = DDIMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
ddpm.to(_lowerCamelCase )
ddpm.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = ddpm(generator=_lowerCamelCase , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 19
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__a :Dict = logging.get_logger(__name__)
__a :str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a :List[str] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
__a :List[Any] = {
'yjernite/retribert-base-uncased': 512,
}
__a :Dict = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : str = RetriBertTokenizer
_lowerCamelCase : int = ['input_ids', 'attention_mask']
def __init__( self : Tuple , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : str=None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 312
|
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> str:
UpperCAmelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCAmelCase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=__UpperCamelCase )
UpperCAmelCase_ = checkpoints.load_tax_checkpoint(__UpperCamelCase )
UpperCAmelCase_ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
UpperCAmelCase_ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCAmelCase_ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
UpperCAmelCase_ = f'layers_{str(__UpperCamelCase )}'
# Self-Attention
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCAmelCase_ = flax_model.params['''encoder''']['''block'''][str(__UpperCamelCase )]['''layer''']
UpperCAmelCase_ = tax_attention_key
UpperCAmelCase_ = tax_attention_out
UpperCAmelCase_ = tax_attention_query
UpperCAmelCase_ = tax_attention_value
UpperCAmelCase_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = tax_global_layer_norm
if split_mlp_wi:
UpperCAmelCase_ = tax_mlp_wi_a
UpperCAmelCase_ = tax_mlp_wi_a
else:
UpperCAmelCase_ = tax_mlp_wi
UpperCAmelCase_ = tax_mlp_wo
UpperCAmelCase_ = tax_mlp_layer_norm
UpperCAmelCase_ = flax_model_encoder_layer_block
# Only for layer 0:
UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCAmelCase_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
UpperCAmelCase_ = tax_encoder_global_rel_embedding
# Assigning
UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
UpperCAmelCase_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCAmelCase_ = f'layers_{str(__UpperCamelCase )}'
# Self-Attention
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''key''']['''kernel''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''out''']['''kernel''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''query''']['''kernel''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCAmelCase_ = flax_model.params['''decoder''']['''block'''][str(__UpperCamelCase )]['''layer''']
UpperCAmelCase_ = tax_attention_key
UpperCAmelCase_ = tax_attention_out
UpperCAmelCase_ = tax_attention_query
UpperCAmelCase_ = tax_attention_value
UpperCAmelCase_ = tax_pre_attention_layer_norm
UpperCAmelCase_ = tax_enc_dec_attention_key
UpperCAmelCase_ = tax_enc_dec_attention_out
UpperCAmelCase_ = tax_enc_dec_attention_query
UpperCAmelCase_ = tax_enc_dec_attention_value
UpperCAmelCase_ = tax_cross_layer_norm
if split_mlp_wi:
UpperCAmelCase_ = tax_mlp_wi_a
UpperCAmelCase_ = tax_mlp_wi_a
else:
UpperCAmelCase_ = tax_mlp_wi
UpperCAmelCase_ = tax_mlp_wo
UpperCAmelCase_ = txa_mlp_layer_norm
UpperCAmelCase_ = flax_model_decoder_layer_block
# Decoder Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
UpperCAmelCase_ = txa_decoder_norm
# Only for layer 0:
UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCAmelCase_ = tax_decoder_rel_embedding
# Token Embeddings
UpperCAmelCase_ = tax_model['''target''']['''token_embedder''']['''embedding''']
UpperCAmelCase_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__UpperCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_lowerCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 177
| 0
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase : Union[str, Any] = 10
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
for i in range(snake_case__ , snake_case__ ):
if array[i] == target:
return i
return -1
def a__ ( snake_case__ , snake_case__ ) -> int:
lowerCamelCase = 0
lowerCamelCase = len(snake_case__ )
while left <= right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase = (left + right) // 3 + 1
lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCamelCase = one_third - 1
elif array[two_third] < target:
lowerCamelCase = two_third + 1
else:
lowerCamelCase = one_third + 1
lowerCamelCase = two_third - 1
else:
return -1
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
if left < right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase = (left + right) // 3 + 1
lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__ , one_third - 1 , snake_case__ , snake_case__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case__ , snake_case__ , snake_case__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case__ , snake_case__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCAmelCase : List[str] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : Tuple = ite_ternary_search(collection, target)
lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""")
| 291
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = """<pad>"""
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 291
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :List[Any] , )-> None:
super().__init__(**lowercase_ )
A__ = size if size is not None else {"shortest_edge": 2_24}
A__ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
A__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
A__ = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCAmelCase_ ( self :str , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , )-> np.ndarray:
A__ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
A__ = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :List[Any] , )-> np.ndarray:
A__ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , )-> Union[str, Any]:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :str , )-> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :ImageInput , lowercase_ :bool = None , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = None , lowercase_ :bool = None , lowercase_ :int = None , lowercase_ :bool = None , lowercase_ :float = None , lowercase_ :bool = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ :str , )-> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
A__ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
A__ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 123
|
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int = 1_00_00_00 ):
A__ = set(range(3 , _lowerCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCamelCase , _lowerCamelCase ) ) )
A__ = [float(_lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 123
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A =logging.get_logger(__name__)
__A ={
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ):
lowerCAmelCase__ = 'nat'
lowerCAmelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowercase=4 , lowercase=3 , lowercase=64 , lowercase=[3, 4, 6, 5] , lowercase=[2, 4, 8, 16] , lowercase=7 , lowercase=3.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=0.0_2 , lowercase=1e-5 , lowercase=0.0 , lowercase=None , lowercase=None , **lowercase , ) -> Any:
super().__init__(**lowercase )
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = num_heads
lowerCamelCase_ = kernel_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(lowercase ) - 1) )
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = ['stem'] + [f'stage{idx}' for idx in range(1 , len(lowercase ) + 1 )]
lowerCamelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
| 19
|
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322
| 0
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def __lowercase ( lowerCamelCase : Namespace ):
return TrainCommand(lowerCamelCase )
class _lowercase ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case : ArgumentParser ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=snake_case , required=snake_case , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=snake_case , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=snake_case , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=snake_case , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=snake_case , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=snake_case , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=snake_case , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=snake_case , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=snake_case , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=snake_case , default=3_2 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=snake_case , default=6_4 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=snake_case , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=snake_case , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=snake_case )
def __init__( self : int , snake_case : Namespace ) -> int:
"""simple docstring"""
UpperCamelCase_ : List[str] = logging.get_logger('transformers-cli/training' )
UpperCamelCase_ : Dict = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=snake_case )
UpperCamelCase_ : Tuple = args.output
UpperCamelCase_ : List[str] = args.column_label
UpperCamelCase_ : Union[str, Any] = args.column_text
UpperCamelCase_ : List[str] = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
UpperCamelCase_ : Optional[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
UpperCamelCase_ : Optional[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCamelCase_ : List[str] = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
UpperCamelCase_ : Union[str, Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCamelCase_ : Optional[int] = args.validation_split
UpperCamelCase_ : int = args.train_batch_size
UpperCamelCase_ : str = args.valid_batch_size
UpperCamelCase_ : List[Any] = args.learning_rate
UpperCamelCase_ : List[str] = args.adam_epsilon
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 50
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowercase :
def __init__( self : List[Any] , snake_case : int , snake_case : Any=9_9 , snake_case : Tuple=1_3 , snake_case : str=7 , snake_case : List[str]=9 , snake_case : Optional[Any]=True , snake_case : Any=True , snake_case : Optional[Any]=False , snake_case : List[str]=3_2 , snake_case : str=5 , snake_case : Any=4 , snake_case : List[str]=3_7 , snake_case : Optional[Any]=8 , snake_case : Optional[Any]=0.1 , snake_case : Dict=0.002 , snake_case : Any=1 , snake_case : Optional[int]=0 , snake_case : List[str]=0 , snake_case : List[str]=None , snake_case : List[str]=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = parent
UpperCamelCase_ : List[Any] = batch_size
UpperCamelCase_ : int = encoder_seq_length
UpperCamelCase_ : int = decoder_seq_length
# For common tests
UpperCamelCase_ : List[Any] = self.decoder_seq_length
UpperCamelCase_ : Optional[Any] = is_training
UpperCamelCase_ : Tuple = use_attention_mask
UpperCamelCase_ : int = use_labels
UpperCamelCase_ : List[str] = vocab_size
UpperCamelCase_ : Dict = hidden_size
UpperCamelCase_ : Any = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : Dict = d_ff
UpperCamelCase_ : List[Any] = relative_attention_num_buckets
UpperCamelCase_ : List[Any] = dropout_rate
UpperCamelCase_ : Dict = initializer_factor
UpperCamelCase_ : Union[str, Any] = eos_token_id
UpperCamelCase_ : Optional[int] = pad_token_id
UpperCamelCase_ : List[str] = decoder_start_token_id
UpperCamelCase_ : str = None
UpperCamelCase_ : int = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int]=None , snake_case : List[Any]=None , snake_case : int=None , snake_case : Optional[int]=None , snake_case : Tuple=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase_ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ : Optional[int] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ : Optional[int] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case )
if decoder_head_mask is None:
UpperCamelCase_ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
UpperCamelCase_ : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCamelCase_ : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ : Union[str, Any] = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : Dict = self.get_config()
UpperCamelCase_ : Dict = config.num_attention_heads
UpperCamelCase_ : Optional[int] = self.prepare_inputs_dict(snake_case , snake_case , snake_case )
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Dict , snake_case : List[str] , snake_case : Tuple , snake_case : int , snake_case : List[str] , snake_case : Optional[Any] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = UMTaModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Any = model(
input_ids=snake_case , decoder_input_ids=snake_case , attention_mask=snake_case , decoder_attention_mask=snake_case , )
UpperCamelCase_ : List[str] = model(input_ids=snake_case , decoder_input_ids=snake_case )
UpperCamelCase_ : Optional[Any] = result.last_hidden_state
UpperCamelCase_ : Optional[Any] = result.past_key_values
UpperCamelCase_ : Optional[int] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Tuple , snake_case : str , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = UMTaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
# first forward pass
UpperCamelCase_ : str = model(snake_case , use_cache=snake_case )
UpperCamelCase_ : List[Any] = model(snake_case )
UpperCamelCase_ : Dict = model(snake_case , use_cache=snake_case )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) + 1 )
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCamelCase_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ : List[Any] = model(snake_case )['last_hidden_state']
UpperCamelCase_ : List[str] = model(snake_case , past_key_values=snake_case )['last_hidden_state']
# select random slice
UpperCamelCase_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase_ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Tuple , snake_case : int , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = UMTaModel(config=snake_case ).to(snake_case ).half().eval()
UpperCamelCase_ : Union[str, Any] = model(**snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(snake_case ).any().item() )
@require_torch
class _lowercase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = True
lowercase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : List[str] = UMTaModel(config_and_inputs[0] ).to(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : Union[str, Any] = config_and_inputs[0]
UpperCamelCase_ : Tuple = UMTaForConditionalGeneration(snake_case ).eval()
model.to(snake_case )
UpperCamelCase_ : str = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
}
for attn_name, (name, mask) in zip(snake_case , head_masking.items() ):
UpperCamelCase_ : Optional[int] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCamelCase_ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case )
UpperCamelCase_ : Any = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case , return_dict_in_generate=snake_case , **snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCamelCase_ : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : str = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case ).to(snake_case )
UpperCamelCase_ : int = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case , legacy=snake_case )
UpperCamelCase_ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
UpperCamelCase_ : Dict = tokenizer(snake_case , return_tensors='pt' , padding=snake_case ).input_ids
# fmt: off
UpperCamelCase_ : List[str] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case , snake_case )
UpperCamelCase_ : int = model.generate(input_ids.to(snake_case ) )
UpperCamelCase_ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
UpperCamelCase_ : Dict = tokenizer.batch_decode(snake_case )
self.assertEqual(snake_case , snake_case )
| 50
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase: Tuple = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: str = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: List[Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Union[str, Any] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_UpperCamelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase_ : str = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case__ : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case__ : bool = field(default=lowercase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
snake_case__ : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
a_ : str = import_module('tasks' )
try:
a_ : Dict = getattr(__A , model_args.task_type )
a_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a_ : List[Any] = token_classification_task.get_labels(data_args.labels )
a_ : Dict[int, str] = dict(enumerate(__A ) )
a_ : List[Any] = len(__A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , idalabel=__A , labelaid={label: i for i, label in enumerate(__A )} , cache_dir=model_args.cache_dir , )
a_ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a_ : Tuple = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
# Get datasets
a_ : Any = (
TokenClassificationDataset(
token_classification_task=__A , data_dir=data_args.data_dir , tokenizer=__A , labels=__A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a_ : Optional[Any] = (
TokenClassificationDataset(
token_classification_task=__A , data_dir=data_args.data_dir , tokenizer=__A , labels=__A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__A : np.ndarray , __A : np.ndarray ) -> Tuple[List[int], List[int]]:
a_ : Union[str, Any] = np.argmax(__A , axis=2 )
a_ , a_ : Dict = preds.shape
a_ : Optional[int] = [[] for _ in range(__A )]
a_ : int = [[] for _ in range(__A )]
for i in range(__A ):
for j in range(__A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__A : EvalPrediction ) -> Dict:
a_ , a_ : Tuple = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__A , __A ),
"precision": precision_score(__A , __A ),
"recall": recall_score(__A , __A ),
"f1": fa_score(__A , __A ),
}
# Data collator
a_ : Union[str, Any] = DataCollatorWithPadding(__A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a_ : Optional[Any] = Trainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , data_collator=__A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a_ : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a_ : str = trainer.evaluate()
a_ : str = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(__A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __A , __A )
writer.write('%s = %s\n' % (key, value) )
results.update(__A )
# Predict
if training_args.do_predict:
a_ : Tuple = TokenClassificationDataset(
token_classification_task=__A , data_dir=data_args.data_dir , tokenizer=__A , labels=__A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a_ , a_ , a_ : Any = trainer.predict(__A )
a_ , a_ : Optional[Any] = align_predictions(__A , __A )
a_ : Dict = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(__A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , __A , __A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
a_ : List[Any] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(__A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(__A , __A , __A )
return results
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 120
|
import json
import sys
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : List[str] ) -> Tuple:
"""simple docstring"""
with open(__A , encoding='utf-8' ) as f:
a_ : Union[str, Any] = json.load(__A )
a_ : Any = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(__A ):
a_ : List[str] = results[benchmark_name]
a_ : int = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
a_ : Any = '| metric |'
a_ : Optional[Any] = '|--------|'
a_ : int = '| new / old (diff) |'
for metric_name in sorted(__A ):
a_ : List[Any] = benchmark_res[metric_name]
a_ : int = metric_vals['new']
a_ : Union[str, Any] = metric_vals.get('old' , __A )
a_ : Optional[int] = metric_vals.get('diff' , __A )
a_ : str = F""" {new_val:f}""" if isinstance(__A , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(__A , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(__A , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(__A ) )
if __name__ == "__main__":
UpperCAmelCase_ : int = sys.argv[1]
UpperCAmelCase_ : Any = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 120
| 1
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> bool:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
UpperCAmelCase__ : str = str(__UpperCAmelCase )
UpperCAmelCase__ : Tuple = ''''''.join(sorted(__UpperCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def a__ ( lowerCAmelCase__ = 99 ) -> int:
if not 0 < percent < 1_00:
raise ValueError('''solution() only accepts values from 0 to 100''' )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Tuple = 1
while True:
if check_bouncy(__UpperCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(9_9)}""")
| 181
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase=2_8_1_2_3 ) -> Any:
lowercase__: Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase__: Union[str, Any] = set()
lowercase__: Optional[Any] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__UpperCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 177
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( A__ ) -> int:
# vision encoder
if "img_encoder.pos_embed" in name:
a__ : List[str] = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
a__ : int = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
a__ : Tuple = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
a__ : Tuple = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
a__ : List[Any] = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
a__ : Tuple = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
a__ : int = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
a__ : List[str] = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
a__ : int = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
a__ : List[str] = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
a__ : Tuple = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
a__ : Tuple = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
a__ : Tuple = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
a__ : List[str] = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
a__ : Any = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
a__ : Dict = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
a__ : Dict = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
a__ : str = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
a__ : Tuple = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
a__ : Tuple = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
a__ : List[Any] = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
a__ : Optional[int] = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
a__ : Dict = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
a__ : Tuple = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def A_ ( A__ , A__ ) -> str:
for key in orig_state_dict.copy().keys():
a__ : int = orig_state_dict.pop(A__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
a__ : int = key.split('.' )
a__ , a__ : List[str] = int(key_split[2] ), int(key_split[4] )
a__ : Optional[int] = config.vision_config.hidden_size
if "weight" in key:
a__ : Any = val[:dim, :]
a__ : Union[str, Any] = val[dim : dim * 2, :]
a__ : Optional[int] = val[-dim:, :]
else:
a__ : Union[str, Any] = val[:dim]
a__ : List[str] = val[dim : dim * 2]
a__ : Optional[Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
a__ : Optional[int] = key.split('.' )
a__ : str = int(key_split[3] )
a__ : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
a__ : int = val[:dim, :]
a__ : List[Any] = val[
dim : dim * 2, :
]
a__ : Optional[int] = val[-dim:, :]
else:
a__ : List[Any] = val[:dim]
a__ : Dict = val[dim : dim * 2]
a__ : str = val[-dim:]
else:
a__ : Any = rename_key(A__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
a__ : List[str] = val.squeeze_()
else:
a__ : Any = val
return orig_state_dict
def A_ ( ) -> List[Any]:
a__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def A_ ( A__ , A__ , A__="groupvit-gcc-yfcc" , A__=False ) -> List[Any]:
a__ : Optional[int] = GroupViTConfig()
a__ : List[str] = GroupViTModel(A__ ).eval()
a__ : Any = torch.load(A__ , map_location='cpu' )['model']
a__ : Union[str, Any] = convert_state_dict(A__ , A__ )
a__ , a__ : Optional[Any] = model.load_state_dict(A__ , strict=A__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(A__ ) == 0)
# verify result
a__ : Any = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
a__ : List[Any] = prepare_img()
a__ : Any = processor(text=['a photo of a cat', 'a photo of a dog'] , images=A__ , padding=A__ , return_tensors='pt' )
with torch.no_grad():
a__ : int = model(**A__ )
if model_name == "groupvit-gcc-yfcc":
a__ : List[str] = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
a__ : Optional[Any] = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , A__ , atol=1E-3 )
processor.save_pretrained(A__ )
model.save_pretrained(A__ )
print('Successfully saved processor and model to' , A__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(A__ , organization='nielsr' )
model.push_to_hub(A__ , organization='nielsr' )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowercase : Optional[int] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 225
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__ : Any = parent
a__ : int = batch_size
a__ : Dict = seq_length
a__ : Tuple = is_training
a__ : Any = use_input_mask
a__ : Optional[Any] = use_token_type_ids
a__ : Dict = use_labels
a__ : Optional[int] = vocab_size
a__ : List[Any] = hidden_size
a__ : int = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : str = intermediate_size
a__ : Optional[int] = hidden_act
a__ : Dict = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : Dict = type_vocab_size
a__ : Any = type_sequence_label_size
a__ : List[str] = initializer_range
a__ : List[str] = num_labels
a__ : Optional[Any] = num_choices
a__ : str = scope
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : Tuple = None
if self.use_input_mask:
a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
a__ : Any = None
if self.use_token_type_ids:
a__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : str = None
a__ : List[Any] = None
a__ : List[str] = None
if self.use_labels:
a__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : str = ids_tensor([self.batch_size] , self.num_choices)
a__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = NystromformerModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__ : int = model(lowercase , token_type_ids=lowercase)
a__ : Optional[Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : List[str] = NystromformerForMaskedLM(config=lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = NystromformerForQuestionAnswering(config=lowercase)
model.to(lowercase)
model.eval()
a__ : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.num_labels
a__ : Optional[Any] = NystromformerForSequenceClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.num_labels
a__ : int = NystromformerForTokenClassification(config=lowercase)
model.to(lowercase)
model.eval()
a__ : str = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : Optional[int] = self.num_choices
a__ : Tuple = NystromformerForMultipleChoice(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : Tuple = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : str = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : Optional[int] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : str = config_and_inputs
a__ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__A : str = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : Optional[Any] = False
__A : Tuple = False
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = NystromformerModelTester(self)
a__ : Any = ConfigTester(self , config_class=lowercase , hidden_size=37)
def __lowercase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : Optional[Any] = type
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase)
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int = NystromformerModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = NystromformerModel.from_pretrained('uw-madison/nystromformer-512')
a__ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
a__ : List[Any] = model(lowercase)[0]
a__ : str = torch.Size((1, 6, 768))
self.assertEqual(output.shape , lowercase)
a__ : str = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4))
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Any = 'the [MASK] of Belgium is Brussels'
a__ : List[str] = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512')
a__ : Optional[int] = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512')
a__ : List[Any] = tokenizer(lowercase , return_tensors='pt')
with torch.no_grad():
a__ : Union[str, Any] = model(encoding.input_ids).logits
a__ : str = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(lowercase) , 'capital')
| 225
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_snake_case : Optional[int] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
def run_func(__lowerCamelCase ):
@wraps(__lowerCamelCase )
def run_in_eager_mode(*__lowerCamelCase , **__lowerCamelCase ):
return func(*__lowerCamelCase , **__lowerCamelCase )
@wraps(__lowerCamelCase )
@tf.function(experimental_compile=__lowerCamelCase )
def run_in_graph_mode(*__lowerCamelCase , **__lowerCamelCase ):
return func(*__lowerCamelCase , **__lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = random.Random()
__snake_case : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : TensorFlowBenchmarkArguments
__UpperCAmelCase : PretrainedConfig
__UpperCAmelCase : str = "TensorFlow"
@property
def __snake_case ( self : Optional[int] ) -> Tuple:
return tf.__version__
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int ) -> float:
# initialize GPU on separate process
__snake_case : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__snake_case : List[Any] = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int ) -> float:
__snake_case : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__snake_case : str = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
__snake_case : int = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__snake_case : str = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
__snake_case : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__snake_case : Optional[Any] = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int ) -> Callable[[], None]:
__snake_case : List[str] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__snake_case : Tuple = (
hasattr(lowerCamelCase , "architectures" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__snake_case : Optional[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__snake_case : str = __import__("transformers" , fromlist=[model_class] )
__snake_case : Dict = getattr(lowerCamelCase , lowerCamelCase )
__snake_case : str = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__snake_case : Dict = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
__snake_case : List[Any] = config.vocab_size if hasattr(lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
__snake_case : str = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
__snake_case : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int ) -> Callable[[], None]:
__snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__snake_case : int = (
hasattr(lowerCamelCase , "architectures" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__snake_case : List[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__snake_case : int = __import__("transformers" , fromlist=[model_class] )
__snake_case : List[Any] = getattr(lowerCamelCase , lowerCamelCase )
__snake_case : List[str] = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__snake_case : List[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
__snake_case : Optional[Any] = config.vocab_size if hasattr(lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
__snake_case : int = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__snake_case : Optional[Any] = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
__snake_case : Tuple = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__snake_case : Union[str, Any] = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
__snake_case : Optional[int] = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
__snake_case : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __snake_case ( self : Optional[int] , lowerCamelCase : int ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__snake_case : Dict = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def __snake_case ( self : Optional[int] , lowerCamelCase : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
__snake_case : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
__snake_case : str = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
__snake_case : Tuple = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__snake_case : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
__snake_case : Dict = meminfo.used
__snake_case : Union[str, Any] = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
__snake_case : Any = None
else:
__snake_case : Optional[int] = measure_peak_memory_cpu(lowerCamelCase )
__snake_case : int = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
__snake_case : Optional[int] = stop_memory_tracing(lowerCamelCase )
if memory is None:
__snake_case : Dict = summary.total
else:
__snake_case : str = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 123
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_snake_case : Dict = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_snake_case : List[str] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
_snake_case : List[str] = "zero2"
_snake_case : Any = "zero3"
_snake_case : Dict = [ZEROa, ZEROa]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__snake_case : Optional[Any] = parameterized.to_safe_name("_".join(str(__lowerCamelCase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
_snake_case : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a (_lowerCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Union[str, Any]:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> Dict:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : Any ) -> str:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
def __snake_case ( self : str , lowerCamelCase : List[Any] ) -> Union[str, Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Any = models[model]
__snake_case : Tuple = self.run_trainer(
stage=lowerCamelCase , model_name=lowerCamelCase , eval_steps=lowerCamelCase , num_train_epochs=1 , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
self.do_checks(lowerCamelCase )
return output_dir
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : int = 1 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCamelCase )
__snake_case : Optional[int] = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__snake_case : Optional[int] = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__snake_case : Dict = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__snake_case : Any = self.get_launcher(lowerCamelCase )
__snake_case : Optional[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
return output_dir
def __snake_case ( self : str , lowerCamelCase : str=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__snake_case : Dict = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 123
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 75
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "rag"
A_ = True
def __init__( self: Optional[int] , __A: Optional[int]=None , __A: Dict=True , __A: Any=None , __A: Dict=None , __A: Optional[int]=None , __A: Optional[Any]=None , __A: Optional[Any]=None , __A: Optional[Any]=" / " , __A: int=" // " , __A: List[Any]=5 , __A: Dict=3_00 , __A: int=7_68 , __A: Tuple=8 , __A: List[Any]="wiki_dpr" , __A: List[str]="train" , __A: Optional[Any]="compressed" , __A: Optional[int]=None , __A: Union[str, Any]=None , __A: Dict=False , __A: Tuple=False , __A: Optional[int]=0.0 , __A: Optional[int]=True , __A: int=False , __A: int=False , __A: Optional[Any]=False , __A: Optional[int]=True , __A: Optional[int]=None , **__A: Optional[Any] , ) -> Union[str, Any]:
super().__init__(
bos_token_id=__A , pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , is_encoder_decoder=__A , prefix=__A , vocab_size=__A , **__A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_A = kwargs.pop('''question_encoder''' )
_A = question_encoder_config.pop('''model_type''' )
_A = kwargs.pop('''generator''' )
_A = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_A = AutoConfig.for_model(__A , **__A )
_A = AutoConfig.for_model(__A , **__A )
_A = reduce_loss
_A = label_smoothing
_A = exclude_bos_score
_A = do_marginalize
_A = title_sep
_A = doc_sep
_A = n_docs
_A = max_combined_length
_A = dataset
_A = dataset_split
_A = index_name
_A = retrieval_vector_size
_A = retrieval_batch_size
_A = passages_path
_A = index_path
_A = use_dummy_dataset
_A = output_retrieved
_A = do_deduplication
_A = use_cache
if self.forced_eos_token_id is None:
_A = getattr(self.generator , '''forced_eos_token_id''' , __A )
@classmethod
def __A ( cls: List[Any] , __A: PretrainedConfig , __A: PretrainedConfig , **__A: Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__A )
def __A ( self: Optional[Any] ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
_A = self.question_encoder.to_dict()
_A = self.generator.to_dict()
_A = self.__class__.model_type
return output
| 75
| 1
|
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : Tuple = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Optional[Any] = """"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
# authorize twitter, initialize tweepy
lowerCamelCase__ : List[Any] = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : List[Any] = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
lowerCamelCase__ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCamelCase__ : Tuple = api.user_timeline(screen_name=_UpperCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
lowerCamelCase__ : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCamelCase__ : Optional[Any] = api.user_timeline(
screen_name=_UpperCAmelCase , count=200 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
lowerCamelCase__ : List[Any] = alltweets[-1].id - 1
print(F"""...{len(_UpperCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCamelCase__ : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
lowerCamelCase__ : Dict = csv.writer(_UpperCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 50
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Union[str, Any] = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 50
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = SwinvaConfig()
UpperCAmelCase_ = swinva_name.split('''_''' )
UpperCAmelCase_ = name_split[1]
if "to" in name_split[3]:
UpperCAmelCase_ = int(name_split[3][-3:] )
else:
UpperCAmelCase_ = int(name_split[3] )
if "to" in name_split[2]:
UpperCAmelCase_ = int(name_split[2][-2:] )
else:
UpperCAmelCase_ = int(name_split[2][6:] )
if model_size == "tiny":
UpperCAmelCase_ = 96
UpperCAmelCase_ = (2, 2, 6, 2)
UpperCAmelCase_ = (3, 6, 12, 24)
elif model_size == "small":
UpperCAmelCase_ = 96
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (3, 6, 12, 24)
elif model_size == "base":
UpperCAmelCase_ = 128
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (4, 8, 16, 32)
else:
UpperCAmelCase_ = 192
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (6, 12, 24, 48)
if "to" in swinva_name:
UpperCAmelCase_ = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCAmelCase_ = 2_1841
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-22k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = img_size
UpperCAmelCase_ = num_classes
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
return config
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
UpperCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCAmelCase_ = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
UpperCAmelCase_ = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
UpperCAmelCase_ = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
UpperCAmelCase_ = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
UpperCAmelCase_ = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
UpperCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
UpperCAmelCase_ = '''layernorm.bias'''
if "head" in name:
UpperCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
UpperCAmelCase_ = '''swinv2.''' + name
return name
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(__UpperCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase_ = key.split('''.''' )
UpperCAmelCase_ = int(key_split[1] )
UpperCAmelCase_ = int(key_split[3] )
UpperCAmelCase_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = val
return orig_state_dict
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase )
timm_model.eval()
UpperCAmelCase_ = get_swinva_config(__UpperCAmelCase )
UpperCAmelCase_ = SwinvaForImageClassification(__UpperCAmelCase )
model.eval()
UpperCAmelCase_ = convert_state_dict(timm_model.state_dict() , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
UpperCAmelCase_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
UpperCAmelCase_ = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase_ = timm_model(inputs['''pixel_values'''] )
UpperCAmelCase_ = model(**__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 344
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344
| 1
|
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
__A : str = get_tests_dir("fixtures/dummy-config.json")
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = 0
def __lowercase ( self : Union[str, Any] ) -> int:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowercase ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase_ : str = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : int ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : List[Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ : Dict = os.path.join(lowerCamelCase , """fake-roberta""" )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
with open(os.path.join(lowerCamelCase , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ : Any = AutoConfig.from_pretrained(lowerCamelCase )
self.assertEqual(type(lowerCamelCase ) , lowerCamelCase )
def __lowercase ( self : Any ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , lowerCamelCase )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase ):
AutoConfig.register("""model""" , lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoConfig.register("""bert""" , lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ : int = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowercase ( self : Any ) -> int:
with self.assertRaisesRegex(
lowerCamelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowercase ( self : Union[str, Any] ) -> str:
with self.assertRaisesRegex(
lowerCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase , revision="""aaaaaa""" )
def __lowercase ( self : Union[str, Any] ) -> Tuple:
with self.assertRaisesRegex(
lowerCamelCase , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowercase ( self : str ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase ):
lowerCAmelCase_ : Optional[int] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCamelCase )
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCamelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase )
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCamelCase , trust_remote_code=lowerCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowercase ( self : Any ) -> List[str]:
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'new-model'
try:
AutoConfig.register("""new-model""" , lowerCamelCase )
# If remote code is not set, the default is to use local
lowerCAmelCase_ : int = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCamelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ : Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCamelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 120
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A__ ):
if len(A__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A__ ) )
return data_lists
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for dlist, weight in zip(A__ , A__ ):
lowerCAmelCase_ : Tuple = min(A__ )
lowerCAmelCase_ : str = max(A__ )
lowerCAmelCase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase_ : List[Any] = f'Invalid weight of {weight:f} provided'
raise ValueError(A__ )
score_lists.append(A__ )
return score_lists
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = final_scores[j] + ele
return final_scores
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_data(A__ )
lowerCAmelCase_ : Tuple = calculate_each_score(A__ , A__ )
lowerCAmelCase_ : Optional[int] = generate_final_scores(A__ )
# append scores to source data
for i, ele in enumerate(A__ ):
source_data[i].append(A__ )
return source_data
| 120
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = BlenderbotSmallTokenizer
_snake_case : Tuple = False
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_UpperCamelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCamelCase = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_UpperCamelCase = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[int] , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case__ ( self : int , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = '''adapt act apte'''
_UpperCamelCase = '''adapt act apte'''
return input_text, output_text
def snake_case__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase = '''adapt act apte'''
_UpperCamelCase = ['''adapt''', '''act''', '''ap@@''', '''te''']
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_UpperCamelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
_UpperCamelCase = '''I am a small frog.'''
_UpperCamelCase = tok([src_text] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )['''input_ids''']
_UpperCamelCase = tok.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_UpperCamelCase = '''I am a small frog .'''
_UpperCamelCase = '''.'''
_UpperCamelCase = tok(lowerCAmelCase__ )['''input_ids''']
_UpperCamelCase = tok(lowerCAmelCase__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 287
|
'''simple docstring'''
import math
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 287
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Union[str, Any] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ['CLIPFeatureExtractor']
lowerCamelCase__ : int = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[Any] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 225
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "speech_to_text"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , _lowerCAmelCase : List[Any]=10_000 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : Union[str, Any]=2_048 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int="relu" , _lowerCAmelCase : Union[str, Any]=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : str=0 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=6_000 , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Optional[Any]=(5, 5) , _lowerCAmelCase : str=1_024 , _lowerCAmelCase : str=80 , _lowerCAmelCase : Tuple=1 , **_lowerCAmelCase : Any , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = max_source_positions
SCREAMING_SNAKE_CASE_ = max_target_positions
SCREAMING_SNAKE_CASE_ = num_conv_layers
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = conv_channels
SCREAMING_SNAKE_CASE_ = input_feat_per_channel
SCREAMING_SNAKE_CASE_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 225
| 1
|
def A ( lowercase = 50 ) -> int:
'''simple docstring'''
UpperCamelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 110
|
_UpperCAmelCase : str = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_UpperCAmelCase : Any = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_UpperCAmelCase : List[Any] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_UpperCAmelCase : Tuple = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_UpperCAmelCase : Union[str, Any] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_UpperCAmelCase : List[str] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_UpperCAmelCase : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_UpperCAmelCase : Dict = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 110
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.