code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 217
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : str = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : int = dct.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = 3129
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """vqa2-id2label.json"""
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = {0: """False""", 1: """True"""}
lowerCamelCase__ : int = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Any = 3
lowerCamelCase__ : List[str] = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCamelCase__ : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : Union[str, Any] = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Dict = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : str = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase__ : Optional[int] = """How many cats are there?"""
lowerCamelCase__ : List[str] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Tuple = torch.Size([1, 11, 30522] )
lowerCamelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : str = torch.Size([1, 3129] )
lowerCamelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : str = torch.Size([1, 2] )
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41
| 0
|
'''simple docstring'''
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any] ) -> int:
super().__init__()
UpperCAmelCase : Optional[int] = class_size
UpperCAmelCase : Optional[int] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCAmelCase : Dict = nn.Linear(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Any ) -> Union[str, Any]:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
UpperCAmelCase : Optional[int] = self.mlp(lowercase_ )
return logits
| 351
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCAmelCase : list = []
for char_count in range(UpperCAmelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 280
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
'''simple docstring'''
from math import factorial
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float ):
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
__lowercase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowercase = float(factorial(lowerCamelCase_ ) )
coefficient /= factorial(lowerCamelCase_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.7_5))
| 217
|
'''simple docstring'''
import numpy as np
def _lowerCAmelCase ( lowerCamelCase_ : np.array ):
return 1 / (1 + np.exp(-vector ))
def _lowerCAmelCase ( lowerCamelCase_ : np.array ):
return vector * sigmoid(1.7_02 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217
| 1
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
if digit_amount > 0:
return round(number - int(UpperCAmelCase__ ), UpperCAmelCase__ )
return number - int(UpperCAmelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 162
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "mvp"
lowercase = ["past_key_values"]
lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase__=50267 , UpperCamelCase__=1024 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__="gelu" , UpperCamelCase__=1024 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=0.0 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=100 , UpperCamelCase__=800 , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = max_position_embeddings
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = classifier_dropout
A_ = use_cache
A_ = encoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
A_ = use_prompt
A_ = prompt_length
A_ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCamelCase__ ):
A_ = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 162
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'Salesforce/blip-image-captioning-base'
snake_case_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case_ = 'image_captioner'
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['image']
snake_case_ = ['text']
def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case , return_tensors="""pt""" )
def _UpperCamelCase ( self : int , snake_case : List[Any] ):
'''simple docstring'''
return self.model.generate(**snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 296
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->List[str]:
A__ : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
A__ : int = 1_0_2_4
A__ : Union[str, Any] = 4_0_9_6
A__ : Optional[int] = 2_4
A__ : int = 1_6
A__ : Union[str, Any] = [5, 1_1, 1_7, 2_3]
A__ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
A__ : Tuple = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
A__ : Optional[int] = True
A__ : int = 1_5_0
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : List[Any] = """ade20k-id2label.json"""
A__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) )
A__ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
A__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Any:
A__ : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[str]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Dict = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
A__ : List[Any] = name.replace("""patch_embed""", """patch_embeddings""" )
if "pos_embed" in name:
A__ : int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
A__ : Tuple = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : List[Any] = name.replace("""proj""", """projection""" )
if "blocks" in name:
A__ : Optional[Any] = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
A__ : int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name:
A__ : Any = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : List[str] = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : Optional[int] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
A__ : List[str] = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
A__ : List[str] = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
A__ : Optional[int] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
A__ : Any = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
A__ : Any = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
A__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : str = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A__ : Optional[Any] = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
A__ : List[Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
A__ : Tuple = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
A__ : Tuple = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
A__ : List[Any] = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : Union[str, Any] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
A__ : Union[str, Any] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
A__ : Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
A__ : Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
A__ : List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
return name
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[str] = in_proj_weight[: config.hidden_size, :]
A__ : int = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str:
A__ , A__ : Dict = get_dpt_config(UpperCAmelCase__ )
# load original state_dict from URL
A__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(UpperCAmelCase__ )
A__ : str = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# Check outputs on an image
A__ : Optional[Any] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
A__ : Dict = DPTImageProcessor(size=UpperCAmelCase__ )
A__ : Optional[int] = prepare_img()
A__ : Any = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
# forward pass
A__ : List[str] = model(**UpperCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCAmelCase__ ).predicted_depth
# Assert logits
A__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
A__ : Optional[int] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], UpperCAmelCase__, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], UpperCAmelCase__ )
)
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
A_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 296
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = VQModel
UpperCAmelCase__ : Optional[Any] = '''sample'''
@property
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]=(32, 32)):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes).to(_snake_case)
return {"sample": image}
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_snake_case)
self.assertIsNotNone(_snake_case)
self.assertEqual(len(loading_info['''missing_keys''']) , 0)
model.to(_snake_case)
UpperCAmelCase_ = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = VQModel.from_pretrained('''fusing/vqgan-dummy''')
model.to(_snake_case).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
UpperCAmelCase_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
UpperCAmelCase_ = image.to(_snake_case)
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase_ = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
| 51
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[-1] * len(_snake_case )
def dfs(_snake_case : Dict , _snake_case : Any ):
__a =True
__a =c
for u in graph[v]:
if not visited[u]:
dfs(_snake_case , 1 - c )
for i in range(len(_snake_case ) ):
if not visited[i]:
dfs(_snake_case , 0 )
for i in range(len(_snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 218
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : Tuple = [0] * len(__lowerCAmelCase )
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[Any] = [1] * len(__lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCAmelCase )
while queue:
_UpperCAmelCase : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_UpperCAmelCase : Optional[int] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCAmelCase )
print(max(__lowerCAmelCase ) )
# Adjacency list of Graph
__a = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 370
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files", [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
], )
def __UpperCAmelCase ( a_: Tuple, a_: Any ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(a_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info", [
DatasetInfo(),
DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ),
], )
def __UpperCAmelCase ( a_: Union[str, Any], a_: DatasetInfo ):
_UpperCAmelCase : Tuple = str(a_ )
dataset_info.write_to_directory(a_ )
_UpperCAmelCase : Any = DatasetInfo.from_directory(a_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a_, "dataset_info.json" ) )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = DatasetInfo(
description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, )
_UpperCAmelCase : Tuple = dataset_info._to_yaml_dict()
assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_UpperCAmelCase : List[Any] = yaml.safe_dump(a_ )
_UpperCAmelCase : Optional[int] = yaml.safe_load(a_ )
assert dataset_info_yaml_dict == reloaded
def __UpperCAmelCase ( ):
_UpperCAmelCase : str = DatasetInfo()
_UpperCAmelCase : List[str] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict", [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
], )
def __UpperCAmelCase ( a_: str, a_: DatasetInfosDict ):
_UpperCAmelCase : Union[str, Any] = str(a_ )
dataset_infos_dict.write_to_directory(a_ )
_UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(a_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a_, "README.md" ) )
| 17
| 0
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 346
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not nums:
return 0
__A : Optional[int] = nums[0]
__A : str = 0
for num in nums[1:]:
__A , __A : Tuple = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class _UpperCAmelCase ( snake_case_ ):
a__ : str = "pix2struct_text_model"
a__ : List[Any] = ["past_key_values"]
a__ : Tuple = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , _lowercase : Any=5_02_44 , _lowercase : Union[str, Any]=7_68 , _lowercase : Optional[int]=64 , _lowercase : Any=20_48 , _lowercase : Any=12 , _lowercase : List[Any]=12 , _lowercase : Optional[int]=32 , _lowercase : List[Any]=1_28 , _lowercase : Any=0.1 , _lowercase : Union[str, Any]=1E-6 , _lowercase : Any=1.0 , _lowercase : List[str]="gelu_new" , _lowercase : List[str]=0 , _lowercase : str=False , _lowercase : Tuple=0 , _lowercase : Any=1 , _lowercase : int=False , _lowercase : List[Any]=True , **_lowercase : List[Any] , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = d_kv
__UpperCAmelCase = d_ff
__UpperCAmelCase = num_layers
__UpperCAmelCase = num_heads
__UpperCAmelCase = relative_attention_num_buckets
__UpperCAmelCase = relative_attention_max_distance
__UpperCAmelCase = dropout_rate
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = use_cache
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = decoder_start_token_id
# for backwards compatibility
__UpperCAmelCase = dense_act_fn
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , tie_word_embeddings=_lowercase , is_decoder=_lowercase , **_lowercase , )
@classmethod
def a ( cls : Any , _lowercase : Union[str, os.PathLike] , **_lowercase : Tuple ):
cls._set_token_in_kwargs(_lowercase )
__UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__UpperCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class _UpperCAmelCase ( snake_case_ ):
a__ : List[str] = "pix2struct_vision_model"
def __init__( self : Union[str, Any] , _lowercase : Optional[Any]=7_68 , _lowercase : List[str]=7_68 , _lowercase : List[str]=20_48 , _lowercase : int=64 , _lowercase : int=12 , _lowercase : Tuple=12 , _lowercase : Optional[Any]="gelu_new" , _lowercase : int=1E-6 , _lowercase : Dict=0.0 , _lowercase : Any=0.0 , _lowercase : Any=1E-10 , _lowercase : Union[str, Any]=1.0 , _lowercase : Any=40_96 , _lowercase : Dict=32 , _lowercase : Union[str, Any]=1_28 , **_lowercase : Optional[Any] , ):
super().__init__(**_lowercase )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = patch_embed_hidden_size
__UpperCAmelCase = d_ff
__UpperCAmelCase = dropout_rate
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = initializer_range
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = dense_act_fn
__UpperCAmelCase = seq_len
__UpperCAmelCase = relative_attention_num_buckets
__UpperCAmelCase = relative_attention_max_distance
__UpperCAmelCase = d_kv
@classmethod
def a ( cls : Optional[int] , _lowercase : Union[str, os.PathLike] , **_lowercase : int ):
cls._set_token_in_kwargs(_lowercase )
__UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__UpperCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class _UpperCAmelCase ( snake_case_ ):
a__ : Any = "pix2struct"
a__ : Optional[Any] = True
def __init__( self : str , _lowercase : int=None , _lowercase : Any=None , _lowercase : List[str]=1.0 , _lowercase : int=0.02 , _lowercase : List[str]=False , _lowercase : Optional[int]=False , _lowercase : Optional[Any]=True , **_lowercase : Any , ):
super().__init__(tie_word_embeddings=_lowercase , is_encoder_decoder=_lowercase , **_lowercase )
if text_config is None:
__UpperCAmelCase = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__UpperCAmelCase = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__UpperCAmelCase = PixaStructTextConfig(**_lowercase )
__UpperCAmelCase = PixaStructVisionConfig(**_lowercase )
__UpperCAmelCase = self.text_config.decoder_start_token_id
__UpperCAmelCase = self.text_config.pad_token_id
__UpperCAmelCase = self.text_config.eos_token_id
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = initializer_range
__UpperCAmelCase = self.initializer_range
__UpperCAmelCase = self.initializer_range
__UpperCAmelCase = is_vqa
@classmethod
def a ( cls : List[str] , _lowercase : PixaStructTextConfig , _lowercase : PixaStructVisionConfig , **_lowercase : Union[str, Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def a ( self : int ):
__UpperCAmelCase = copy.deepcopy(self.__dict__ )
__UpperCAmelCase = self.text_config.to_dict()
__UpperCAmelCase = self.vision_config.to_dict()
__UpperCAmelCase = self.__class__.model_type
return output
| 356
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _UpperCAmelCase :
def __init__( self : Dict , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Dict=32 , _lowercase : Any=2 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=16 , _lowercase : Optional[int]=[1, 2, 1] , _lowercase : int=[2, 2, 4] , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=2.0 , _lowercase : Any=True , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.1 , _lowercase : str="gelu" , _lowercase : List[Any]=False , _lowercase : List[Any]=True , _lowercase : Optional[Any]=0.02 , _lowercase : str=1E-5 , _lowercase : str=True , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Any=10 , _lowercase : int=8 , _lowercase : Optional[Any]=["stage1", "stage2", "stage3"] , _lowercase : Optional[Any]=[1, 2, 3] , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = patch_norm
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = is_training
__UpperCAmelCase = scope
__UpperCAmelCase = use_labels
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = encoder_stride
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
def a ( self : int ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Dict ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int ):
__UpperCAmelCase = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a ( self : int , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ):
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
__UpperCAmelCase = ['''stem''']
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
a__ : List[str] = False
a__ : int = False
a__ : str = False
a__ : str = False
a__ : Any = False
def a ( self : Optional[Any] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def a ( self : int ):
pass
def a ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : str ):
return
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def a ( self : List[Any] ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def a ( self : str ):
pass
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def a ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def a ( self : Optional[Any] ):
pass
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple ):
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a ( self : str ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def a ( self : Any ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : str ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : Tuple ):
pass
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase : List[str] ):
__UpperCAmelCase = 0
return t
def check_equivalence(_lowercase : List[Any] , _lowercase : Any , _lowercase : str , _lowercase : List[str]={} ):
with torch.no_grad():
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase )
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase : Dict , _lowercase : Optional[Any] ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has'''
F''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
@require_torch
class _UpperCAmelCase ( unittest.TestCase , _lowerCAmelCase ):
a__ : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ : List[str] = MaskFormerSwinConfig
def a ( self : List[str] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
def a ( self : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__UpperCAmelCase = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
__UpperCAmelCase = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__UpperCAmelCase = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__UpperCAmelCase = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 86
| 0
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0_0 ) -> int:
__lowerCAmelCase: str = 2**power
__lowerCAmelCase: Any = 0
while n:
__lowerCAmelCase , __lowerCAmelCase: Tuple = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 217
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__A = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__A = {"facebook/blenderbot-3B": 128}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[Any] = BlenderbotTokenizer
def __init__( self : Any , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]="replace" , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : List[Any]="<mask>" , UpperCamelCase__ : Any=False , UpperCamelCase__ : int=True , **UpperCamelCase__ : Union[str, Any] , )-> int:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Dict = getattr(UpperCamelCase__ , pre_tok_state.pop("type"))
__lowerCAmelCase: int = add_prefix_space
__lowerCAmelCase: str = pre_tok_class(**UpperCamelCase__)
__lowerCAmelCase: List[Any] = add_prefix_space
__lowerCAmelCase: Union[str, Any] = "post_processor"
__lowerCAmelCase: Tuple = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
if tokenizer_component_instance:
__lowerCAmelCase: List[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: str = tuple(state["sep"])
if "cls" in state:
__lowerCAmelCase: Any = tuple(state["cls"])
__lowerCAmelCase: int = False
if state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Dict = add_prefix_space
__lowerCAmelCase: Optional[Any] = True
if state.get("trim_offsets" , UpperCamelCase__) != trim_offsets:
__lowerCAmelCase: Dict = trim_offsets
__lowerCAmelCase: Tuple = True
if changes_to_apply:
__lowerCAmelCase: Dict = getattr(UpperCamelCase__ , state.pop("type"))
__lowerCAmelCase: str = component_class(**UpperCamelCase__)
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def lowercase_ ( self : str , UpperCamelCase__ : Union[str, Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else value
__lowerCAmelCase: Any = value
def lowercase_ ( self : Union[str, Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[str] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int)-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
def lowercase_ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = [self.sep_token_id]
__lowerCAmelCase: Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[Any]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Dict , UpperCamelCase__ : "Conversation")-> List[int]:
'''simple docstring'''
__lowerCAmelCase: str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = " ".join(UpperCamelCase__)
__lowerCAmelCase: Tuple = self.encode(UpperCamelCase__)
if len(UpperCamelCase__) > self.model_max_length:
__lowerCAmelCase: int = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 217
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''input_features''']
def __init__( self : Any , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : str=1_6000 , UpperCAmelCase_ : Optional[int]=160 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=False , **UpperCAmelCase_ : Tuple , ):
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Any = n_fft
SCREAMING_SNAKE_CASE : List[Any] = hop_length
SCREAMING_SNAKE_CASE : List[str] = chunk_length
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = self.n_samples // hop_length
SCREAMING_SNAKE_CASE : Union[str, Any] = sampling_rate
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase_ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=UpperCAmelCase_ , norm="slaney" , mel_scale="slaney" , )
def _A ( self : Optional[int] , UpperCAmelCase_ : np.array ):
SCREAMING_SNAKE_CASE : List[Any] = spectrogram(
UpperCAmelCase_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
SCREAMING_SNAKE_CASE : List[str] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : Tuple = np.maximum(UpperCAmelCase_ , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE : Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _A ( UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : float = 0.0 ):
if attention_mask is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(UpperCAmelCase_ , np.intaa )
SCREAMING_SNAKE_CASE : int = []
for vector, length in zip(UpperCAmelCase_ , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE : Optional[Any] = padding_value
normed_input_values.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[Any] , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "max_length" , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE : Any = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE : List[Any] = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE : Optional[Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = self.pad(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=max_length if max_length else self.n_samples , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE : List[str] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE : int = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE : int = [self._np_extract_fbank_features(UpperCAmelCase_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE : int = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE : List[Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = padded_inputs.convert_to_tensors(UpperCAmelCase_ )
return padded_inputs
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 355
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Optional[int] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path
elif issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Tuple = "train"
SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path}
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
SCREAMING_SNAKE_CASE : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase )
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert get_writer_batch_size(lowercase ) == expected
| 319
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = "convbert"
def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[Any]=30522 ,lowerCamelCase__ : Optional[Any]=768 ,lowerCamelCase__ : Union[str, Any]=12 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Any="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Dict=512 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Any=0.02 ,lowerCamelCase__ : Optional[int]=1e-1_2 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Union[str, Any]=0 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Union[str, Any]=768 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Optional[Any]=9 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : Any=None ,**lowerCamelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = head_ratio
SCREAMING_SNAKE_CASE = conv_kernel_size
SCREAMING_SNAKE_CASE = num_groups
SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 296
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git_vision_model"
def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git"
def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = num_image_with_embedding
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 296
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowerCamelCase__ : Union[str, Any] = (low + high) // 2
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = max_subarray(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = max_subarray(_lowerCamelCase , mid + 1 , _lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = max_cross_sum(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : str = float('-inf' ), -1
lowerCamelCase__ , lowerCamelCase__ : List[str] = float('-inf' ), -1
lowerCamelCase__ : int | float = 0
for i in range(_lowerCamelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowerCamelCase__ : Any = summ
lowerCamelCase__ : List[str] = i
lowerCamelCase__ : str = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowerCamelCase__ : Tuple = summ
lowerCamelCase__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = [randint(1 , _lowerCamelCase ) for _ in range(_lowerCamelCase )]
lowerCamelCase__ : str = time.time()
max_subarray(_lowerCamelCase , 0 , input_size - 1 )
lowerCamelCase__ : Union[str, Any] = time.time()
return end - start
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
lowerCamelCase__ : Tuple = [time_max_subarray(_lowerCamelCase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(_lowerCamelCase , _lowerCamelCase ):
print(_lowerCamelCase , '\t\t' , _lowerCamelCase )
plt.plot(_lowerCamelCase , _lowerCamelCase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 1
|
"""simple docstring"""
A: Dict = 8.314_4598
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A: Dict = 3_0_0
A: Dict = 2_8
A: str = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 109
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ):
pass
def _A ( UpperCamelCase_ : Union[str, Any]) -> Any:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ):
__lowercase = pipeline(
"document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = INVOICE_URL
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
__lowercase = "What is the placebo?"
__lowercase = [
{
"image": load_image(UpperCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ):
__lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 )
self.assertEqual(
UpperCAmelCase__, [
[
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" )
__lowercase = INVOICE_URL
__lowercase = "How many cats are there?"
__lowercase = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0},
]
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = []
__lowercase = []
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : List[str] ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Union[str, Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self : List[Any] ):
pass
| 17
| 0
|
import argparse
import os
import re
UpperCamelCase__ = 'src/diffusers'
# Pattern that looks at the indentation in a line.
UpperCamelCase__ = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase__ = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase__ = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase__ = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase__ = re.compile(R'\[([^\]]+)\]')
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def lowerCAmelCase_ ( __A, __A="", __A=None, __A=None ) -> str:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
UpperCAmelCase__ = ["\n".join(lines[:index] )]
else:
UpperCAmelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase__ = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__A ) )
if index < len(__A ) - 1:
UpperCAmelCase__ = [lines[index + 1]]
index += 1
else:
UpperCAmelCase__ = []
else:
blocks.append("\n".join(__A ) )
UpperCAmelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append("\n".join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
def _inner(__A ):
return key(__A ).lower().replace("_", "" )
return _inner
def lowerCAmelCase_ ( __A, __A=None ) -> List[str]:
'''simple docstring'''
def noop(__A ):
return x
if key is None:
UpperCAmelCase__ = noop
# Constants are all uppercase, they go first.
UpperCAmelCase__ = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase__ = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase__ = [obj for obj in objects if not key(__A )[0].isupper()]
UpperCAmelCase__ = ignore_underscore(__A )
return sorted(__A, key=__A ) + sorted(__A, key=__A ) + sorted(__A, key=__A )
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
def _replace(__A ):
UpperCAmelCase__ = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
UpperCAmelCase__ = [part.strip().replace("\"", "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase__ = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(__A )] ) + "]"
UpperCAmelCase__ = import_statement.split("\n" )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase__ = 2 if lines[1].strip() == "[" else 1
UpperCAmelCase__ = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase__ = sort_objects(__A, key=lambda __A : x[1] )
UpperCAmelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase__ = _re_bracket_content.sub(_replace, lines[1] )
else:
UpperCAmelCase__ = [part.strip().replace("\"", "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase__ = keys[:-1]
UpperCAmelCase__ = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase__ = _re_bracket_content.sub(_replace, __A )
return import_statement
def lowerCAmelCase_ ( __A, __A=True ) -> Any:
'''simple docstring'''
with open(__A, "r" ) as f:
UpperCAmelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase__ = split_code_in_indented_blocks(
__A, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase__ = main_blocks[block_idx]
UpperCAmelCase__ = block.split("\n" )
# Get to the start of the imports.
UpperCAmelCase__ = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase__ = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase__ = "\n".join(block_lines[line_idx:-1] )
UpperCAmelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase__ = split_code_in_indented_blocks(__A, indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase__ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase__ = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase__ = [(i, key) for i, key in enumerate(__A ) if key is not None]
UpperCAmelCase__ = [x[0] for x in sorted(__A, key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase__ = 0
UpperCAmelCase__ = []
for i in range(len(__A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase__ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(__A, "w" ) as f:
f.write("\n".join(__A ) )
def lowerCAmelCase_ ( __A=True ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
UpperCAmelCase__ = sort_imports(os.path.join(__A, "__init__.py" ), check_only=__A )
if result:
UpperCAmelCase__ = [os.path.join(__A, "__init__.py" )]
if len(__A ) > 0:
raise ValueError(f"""Would overwrite {len(__A )} files, run `make style`.""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
UpperCamelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 143
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__(self : Tuple , __UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForSequenceClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFDistilBertForMultipleChoice(__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForTokenClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = False
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , dim=3_7 )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase__ = TFDistilBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
UpperCAmelCase__ = [1, 6, 7_6_8]
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 143
| 1
|
def A ( _SCREAMING_SNAKE_CASE ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be a 'int' type" )
return bin(_SCREAMING_SNAKE_CASE ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size
| 86
| 0
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : str ):
"""simple docstring"""
with open(a__ , encoding='''utf-8''' ) as input_file:
__snake_case = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
__snake_case = input_file.read()
__snake_case = regexp.search(a__ )
return match
def a (self : Union[str, Any] , a__ : str ):
"""simple docstring"""
with open(a__ , encoding='''utf-8''' ) as input_file:
__snake_case = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
__snake_case = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__snake_case = regexp.finditer(a__ )
__snake_case = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a (self : List[str] ):
"""simple docstring"""
__snake_case = Path('''./datasets''' )
__snake_case = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(a__ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = Path('''./datasets''' )
__snake_case = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(a__ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 238
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''bird'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''Chef in the kitchen'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 238
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=[1, 16, 4, 4] , _SCREAMING_SNAKE_CASE=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Union[str, Any] = patch_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : List[str] = scope
UpperCAmelCase : Optional[int] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : Dict = (self.image_size // 32) ** 2
UpperCAmelCase : Optional[int] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : int = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = ViTHybridModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = ViTHybridModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : List[Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ViTHybridModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _snake_case ( ) -> Tuple:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Any = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase : Any = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
UpperCAmelCase : List[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCAmelCase : Any = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : List[Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 366
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A: int = logging.get_logger(__name__)
A: Any = {"vocab_file": "vocab.txt"}
A: Optional[int] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
A: Optional[int] = {
"YituTech/conv-bert-base": 5_1_2,
"YituTech/conv-bert-medium-small": 5_1_2,
"YituTech/conv-bert-small": 5_1_2,
}
A: int = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[int] = strip_accents
UpperCAmelCase : List[str] = tokenize_chinese_chars
UpperCAmelCase : Dict = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = do_lower_case
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 76
| 0
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
__UpperCamelCase = None
def A ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
__UpperCamelCase = input('Enter the value of the root node: ' ).strip().lower()
__UpperCamelCase = queue.Queue()
__UpperCamelCase = TreeNode(int(snake_case ) )
q.put(snake_case )
while not q.empty():
__UpperCamelCase = q.get()
__UpperCamelCase = f'Enter the left node of {node_found.data}: '
__UpperCamelCase = input(snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
__UpperCamelCase = TreeNode(int(snake_case ) )
__UpperCamelCase = left_node
q.put(snake_case )
__UpperCamelCase = f'Enter the right node of {node_found.data}: '
__UpperCamelCase = input(snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
__UpperCamelCase = TreeNode(int(snake_case ) )
__UpperCamelCase = right_node
q.put(snake_case )
raise
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase = queue.Queue()
q.put(snake_case )
while not q.empty():
__UpperCamelCase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase = queue.Queue()
q.put(snake_case )
while not q.empty():
__UpperCamelCase = []
while not q.empty():
__UpperCamelCase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case )
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase = []
__UpperCamelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(snake_case )
__UpperCamelCase = n.left
# end of while means current node doesn't have left child
__UpperCamelCase = stack.pop()
# start to traverse its right child
__UpperCamelCase = n.right
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase = []
__UpperCamelCase = node
while n or stack:
while n:
stack.append(snake_case )
__UpperCamelCase = n.left
__UpperCamelCase = stack.pop()
print(n.data , end=',' )
__UpperCamelCase = n.right
def A ( snake_case :TreeNode ) -> None:
if not isinstance(snake_case , snake_case ) or not node:
return
__UpperCamelCase , __UpperCamelCase = [], []
__UpperCamelCase = node
stacka.append(snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
__UpperCamelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def A ( snake_case :str = "" , snake_case :str=5_0 , snake_case :Any="*" ) -> str:
if not s:
return "\n" + width * char
__UpperCamelCase , __UpperCamelCase = divmod(width - len(snake_case ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Union[str, Any] = """markuplm"""
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-1_2 , _lowercase=0 , _lowercase=0 , _lowercase=2 , _lowercase=256 , _lowercase=1_024 , _lowercase=216 , _lowercase=1_001 , _lowercase=32 , _lowercase=50 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , )-> int:
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = classifier_dropout
# additional properties
UpperCamelCase_ = max_depth
UpperCamelCase_ = max_xpath_tag_unit_embeddings
UpperCamelCase_ = max_xpath_subs_unit_embeddings
UpperCamelCase_ = tag_pad_id
UpperCamelCase_ = subs_pad_id
UpperCamelCase_ = xpath_unit_hidden_size
| 60
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , )-> Optional[int]:
UpperCamelCase_ = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = apply_ocr
def UpperCAmelCase_ ( self )-> str:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , "do_resize" ) )
self.assertTrue(hasattr(_lowercase , "size" ) )
self.assertTrue(hasattr(_lowercase , "apply_ocr" ) )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCAmelCase_ ( self )-> Any:
pass
def UpperCAmelCase_ ( self )-> List[str]:
# Initialize image_processing
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , _lowercase )
self.assertIsInstance(encoding.boxes , _lowercase )
# Test batched
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self )-> str:
# Initialize image_processing
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self )-> List[str]:
# Initialize image_processing
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self )-> Any:
# with apply_OCR = True
UpperCamelCase_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCamelCase_ = Image.open(ds[0]["file"] ).convert("RGB" )
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowercase )
self.assertListEqual(encoding.boxes , _lowercase )
# with apply_OCR = False
UpperCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=_lowercase )
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 60
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = ['''model.decoder.embed_positions.weights''']
def UpperCamelCase__ ( A__ ) -> List[str]:
if "emb" in name:
snake_case__ : Union[str, Any] = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
snake_case__ : Tuple = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
snake_case__ : Tuple = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
snake_case__ : Optional[Any] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
snake_case__ : int = name.replace('linear2' , 'fc2' )
if "norm1" in name:
snake_case__ : Any = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
snake_case__ : str = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
snake_case__ : List[Any] = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
snake_case__ : List[str] = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
snake_case__ : List[Any] = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case__ : Dict = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def UpperCamelCase__ ( A__ , A__ ) -> Tuple[Dict, Dict]:
snake_case__ : List[str] = list(state_dict.keys() )
snake_case__ : Tuple = {}
for key in keys:
snake_case__ : Tuple = state_dict.pop(A__ )
snake_case__ : Optional[Any] = rename_keys(A__ )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case__ : Dict = val[:hidden_size, :]
snake_case__ : List[Any] = val[hidden_size : 2 * hidden_size, :]
snake_case__ : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case__ : Tuple = val
else:
snake_case__ : str = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase__ ( A__ ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case__ : List[str] = 1024
snake_case__ : Optional[Any] = 24
snake_case__ : str = 16
elif checkpoint == "medium":
snake_case__ : Optional[Any] = 1536
snake_case__ : Optional[Any] = 48
snake_case__ : Dict = 24
elif checkpoint == "large":
snake_case__ : Dict = 2048
snake_case__ : Dict = 48
snake_case__ : Dict = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case__ : Optional[int] = MusicgenDecoderConfig(
hidden_size=A__ , ffn_dim=hidden_size * 4 , num_hidden_layers=A__ , num_attention_heads=A__ , )
return config
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__=None , A__=None , A__="cpu" ) -> Optional[int]:
snake_case__ : Union[str, Any] = MusicGen.get_pretrained(A__ , device=A__ )
snake_case__ : Any = decoder_config_from_checkpoint(A__ )
snake_case__ : Any = fairseq_model.lm.state_dict()
snake_case__ , snake_case__ : Union[str, Any] = rename_state_dict(
A__ , hidden_size=decoder_config.hidden_size )
snake_case__ : Any = TaEncoderModel.from_pretrained('t5-base' )
snake_case__ : Dict = EncodecModel.from_pretrained('facebook/encodec_32khz' )
snake_case__ : Optional[Any] = MusicgenForCausalLM(A__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case__ , snake_case__ : List[Any] = decoder.load_state_dict(A__ , strict=A__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(A__ )
if len(A__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(A__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case__ : Dict = MusicgenForConditionalGeneration(text_encoder=A__ , audio_encoder=A__ , decoder=A__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(A__ )
# check we can do a forward pass
snake_case__ : Tuple = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case__ : List[str] = model(input_ids=A__ , decoder_input_ids=A__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
snake_case__ : List[str] = AutoTokenizer.from_pretrained('t5-base' )
snake_case__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
snake_case__ : List[Any] = MusicgenProcessor(feature_extractor=A__ , tokenizer=A__ )
# set the appropriate bos/pad token ids
snake_case__ : Optional[int] = 2048
snake_case__ : List[Any] = 2048
# set other default generation config params
snake_case__ : Any = int(30 * audio_encoder.config.frame_rate )
snake_case__ : int = True
snake_case__ : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(A__ ).mkdir(exist_ok=A__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(A__ )
processor.push_to_hub(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 143
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase__ : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase__ : Optional[int] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase__ : Optional[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase__ : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase__ : Any = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase__ : Dict = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase__ : Optional[int] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def UpperCamelCase__ ( ) -> Any:
snake_case__ , snake_case__ : List[str] = randrange(len(A__ ) ), randrange(len(A__ ) )
snake_case__ : str = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
snake_case__ , snake_case__ : Any = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase__ ( A__ = 100 ) -> Optional[int]:
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Any:
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Dict:
snake_case__ : Optional[int] = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> str:
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = [PokerHand(A__ ) for hand in SORTED_HANDS]
snake_case__ : Optional[Any] = poker_hands.copy()
shuffle(A__ )
snake_case__ : Tuple = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def UpperCamelCase__ ( ) -> str:
# Test that five high straights are compared correctly.
snake_case__ : int = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase__ ( ) -> Union[str, Any]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
snake_case__ : Optional[int] = PokerHand('2C 4S AS 3D 5C' )
snake_case__ : Optional[int] = True
snake_case__ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase__ ( ) -> List[str]:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
snake_case__ : Any = 0
snake_case__ : Optional[Any] = os.path.abspath(os.path.dirname(A__ ) )
snake_case__ : List[str] = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
snake_case__ : Tuple = line[:14].strip()
snake_case__ : List[str] = line[15:].strip()
snake_case__ , snake_case__ : Any = PokerHand(A__ ), PokerHand(A__ )
snake_case__ : Tuple = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 143
| 1
|
"""simple docstring"""
import operator as op
_UpperCamelCase = """scaler.pt"""
_UpperCamelCase = """pytorch_model"""
_UpperCamelCase = """random_states"""
_UpperCamelCase = """optimizer"""
_UpperCamelCase = """scheduler"""
_UpperCamelCase = """pytorch_model.bin"""
_UpperCamelCase = """pytorch_model.bin.index.json"""
_UpperCamelCase = """model.safetensors"""
_UpperCamelCase = """model.safetensors.index.json"""
_UpperCamelCase = """1.10.2"""
_UpperCamelCase = """py38"""
_UpperCamelCase = """4.17.0"""
_UpperCamelCase = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
_UpperCamelCase = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
_UpperCamelCase = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
_UpperCamelCase = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
_UpperCamelCase = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
_UpperCamelCase = """2.0.1"""
_UpperCamelCase = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
_UpperCamelCase = ["""default""", """reduce-overhead""", """max-autotune"""]
_UpperCamelCase = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_UpperCamelCase = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
_UpperCamelCase = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
_UpperCamelCase = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 351
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 234
| 0
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case__ ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='''mock-s3-bucket'''
lowerCamelCase__ : Optional[int] =f'''s3://{mock_bucket}'''
lowerCamelCase__ : Dict =extract_path_from_uri(__lowerCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
lowerCamelCase__ : Dict ='''./local/path'''
lowerCamelCase__ : str =extract_path_from_uri(__lowerCamelCase )
assert dataset_path == new_dataset_path
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : str =is_remote_filesystem(__lowerCamelCase )
assert is_remote is True
lowerCamelCase__ : Tuple =fsspec.filesystem('''file''' )
lowerCamelCase__ : List[str] =is_remote_filesystem(__lowerCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowerCamelCase__ : Any =input_paths[compression_fs_class.protocol]
if input_path is None:
lowerCamelCase__ : List[Any] =f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCamelCase )
lowerCamelCase__ : List[Any] =fsspec.filesystem(compression_fs_class.protocol , fo=__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] =os.path.basename(__lowerCamelCase )
lowerCamelCase__ : List[str] =expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(__lowerCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int ={'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowerCamelCase__ : str =compressed_file_paths[protocol]
lowerCamelCase__ : Tuple ='''dataset.jsonl'''
lowerCamelCase__ : Dict =f'''{protocol}://{member_file_path}::{compressed_file_path}'''
lowerCamelCase__ , *lowerCamelCase__ : str =fsspec.get_fs_token_paths(__lowerCamelCase )
assert fs.isfile(__lowerCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =hf_api.dataset_info(__lowerCamelCase , token=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =HfFileSystem(repo_info=__lowerCamelCase , token=__lowerCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__lowerCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__lowerCamelCase , __lowerCamelCase , clobber=__lowerCamelCase )
with pytest.warns(__lowerCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__lowerCamelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 238
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict, lowerCamelCase : list )-> None:
lowerCamelCase__ : Tuple =set_counts
lowerCamelCase__ : Dict =max(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =len(lowerCamelCase )
lowerCamelCase__ : List[str] =[1] * num_sets
lowerCamelCase__ : str =list(range(lowerCamelCase ) )
def snake_case ( self : Tuple, lowerCamelCase : int, lowerCamelCase : int )-> bool:
lowerCamelCase__ : List[Any] =self.get_parent(lowerCamelCase )
lowerCamelCase__ : str =self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCamelCase__ : Tuple =0
lowerCamelCase__ : int =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCamelCase__ : str =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : List[str] =src_parent
lowerCamelCase__ : Dict =self.set_counts[src_parent]
lowerCamelCase__ : str =max(self.max_set, lowerCamelCase )
return True
def snake_case ( self : Union[str, Any], lowerCamelCase : int )-> int:
if self.parents[disj_set] == disj_set:
return disj_set
lowerCamelCase__ : Tuple =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 238
| 1
|
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :List[Any] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'camembert'
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : List[Any] = vocab_size
A_ : int = hidden_size
A_ : Dict = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : str = intermediate_size
A_ : int = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : int = initializer_range
A_ : str = layer_norm_eps
A_ : int = position_embedding_type
A_ : Dict = use_cache
A_ : Any = classifier_dropout
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 135
| 1
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Tuple:
snake_case__ : Dict = os.path.abspath(_a )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
snake_case__ : Any = tf.train.list_variables(_a )
snake_case__ : str = []
snake_case__ : Tuple = []
snake_case__ : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case__ : Tuple = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case__ : Optional[Any] = name[1:]
# figure out how many levels deep the name is
snake_case__ : Dict = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(_a )
# read data
snake_case__ : Optional[int] = tf.train.load_variable(_a , _a )
names.append('/'.join(_a ) )
arrays.append(_a )
logger.info(F"""Read a total of {len(_a ):,} layers""" )
# Sanity check
if len(set(_a ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(_a ) )})""" )
snake_case__ : int = list(set(_a ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(_a , _a ):
snake_case__ : int = full_name.split('/' )
snake_case__ : List[str] = model
snake_case__ : List[str] = []
for i, m_name in enumerate(_a ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
snake_case__ : List[str] = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
snake_case__ : List[Any] = getattr(_a , 'embeddings' )
snake_case__ : Any = getattr(_a , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
snake_case__ : Optional[int] = getattr(_a , 'encoder' )
snake_case__ : Union[str, Any] = getattr(_a , 'layer' )
snake_case__ : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
snake_case__ : Dict = getattr(_a , 'pooler' )
snake_case__ : Union[str, Any] = getattr(_a , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
snake_case__ : Optional[Any] = getattr(_a , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
snake_case__ : str = getattr(_a , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
snake_case__ : Optional[int] = getattr(_a , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
snake_case__ : Optional[int] = getattr(_a , 'token_type_embeddings' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('weight' )
snake_case__ : str = getattr(_a , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
snake_case__ : List[str] = getattr(_a , 'attention' )
snake_case__ : Union[str, Any] = getattr(_a , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
snake_case__ : str = getattr(_a , 'attention' )
snake_case__ : Optional[Any] = getattr(_a , 'output' )
snake_case__ : Tuple = getattr(_a , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
snake_case__ : int = getattr(_a , 'attention' )
snake_case__ : List[str] = getattr(_a , 'output' )
snake_case__ : int = getattr(_a , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
snake_case__ : Union[str, Any] = getattr(_a , 'output' )
snake_case__ : str = getattr(_a , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
snake_case__ : Dict = getattr(_a , 'output' )
snake_case__ : List[str] = getattr(_a , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
snake_case__ : Optional[int] = getattr(_a , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
snake_case__ : int = getattr(_a , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
snake_case__ : Union[str, Any] = getattr(_a , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
snake_case__ : Optional[Any] = getattr(_a , 'intermediate' )
snake_case__ : Any = getattr(_a , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
snake_case__ : Any = getattr(_a , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
snake_case__ : Optional[int] = getattr(_a , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
snake_case__ : Optional[int] = getattr(_a , 'weight' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
snake_case__ : List[str] = ".".join(_a )
if re.match(r'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , _a ) or re.match(
r'(\S+)\.attention\.output\.dense\.weight' , _a ):
snake_case__ : Optional[Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case__ : int = array.transpose()
if pointer.shape == array.shape:
snake_case__ : Optional[Any] = torch.from_numpy(_a )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Union[str, Any]:
# Instantiate model
logger.info(F"""Loading model based on config from {config_path}...""" )
snake_case__ : Optional[int] = BertConfig.from_json_file(_a )
snake_case__ : Dict = BertModel(_a )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(_a , _a , _a )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowerCAmelCase__ : Union[str, Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 143
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='vit_msn'
def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = qkv_bias
| 76
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase ), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase ), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
"""simple docstring"""
from manim import *
class snake_case_( a__ ):
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[str] = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : str = [mem.copy() for i in range(6 )]
lowerCAmelCase : str = [mem.copy() for i in range(6 )]
lowerCAmelCase : List[str] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : Optional[Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : Optional[Any] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : Optional[int] = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase : List[str] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )]
lowerCAmelCase : Any = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : str = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase : int = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
gpu.align_to(UpperCamelCase_ , UpperCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : List[Any] = Text('''Model''' , font_size=2_4 )
lowerCAmelCase : Any = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , )
lowerCAmelCase : Union[str, Any] = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=2_4 , )
lowerCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=2.5 ) , Write(UpperCamelCase_ ) , Write(UpperCamelCase_ ) )
self.add(UpperCamelCase_ )
lowerCAmelCase : Dict = []
lowerCAmelCase : str = []
lowerCAmelCase : List[str] = []
for i, rect in enumerate(UpperCamelCase_ ):
lowerCAmelCase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase_ )
cpu_target.generate_target()
lowerCAmelCase : Dict = 0.46 / 4
lowerCAmelCase : int = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase_ , buff=0.0 )
cpu_targs.append(UpperCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase_ ) )
second_animations.append(MoveToTarget(UpperCamelCase_ , run_time=1.5 ) )
self.play(*UpperCamelCase_ )
self.play(*UpperCamelCase_ )
self.wait()
| 60
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case__ : Dict = logging.getLogger(__name__)
def _snake_case ( _snake_case : Any , _snake_case : Any ):
return (preds == labels).mean()
@dataclass
class snake_case_:
__UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case_:
__UpperCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
__UpperCamelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
__UpperCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
try:
lowerCAmelCase : Tuple = processors[data_args.task_name]()
lowerCAmelCase : Any = processor.get_labels()
lowerCAmelCase : Union[str, Any] = len(_snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase : List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_snake_case : EvalPrediction ) -> Dict:
lowerCAmelCase : int = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_snake_case , p.label_ids )}
# Data collator
lowerCAmelCase : List[Any] = DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase : Union[str, Any] = Trainer(
model=_snake_case , args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , compute_metrics=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase : Any = trainer.evaluate()
lowerCAmelCase : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _snake_case , _snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_snake_case )
return results
def _snake_case ( _snake_case : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 60
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=A_ ):
A__ : str = ["sentencepiece"]
def __init__(self : Union[str, Any] , *snake_case__ : Dict , **snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Dict = ["sentencepiece"]
def __init__(self : str , *snake_case__ : str , **snake_case__ : int ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Tuple = ["sentencepiece"]
def __init__(self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Any ) -> Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Optional[int] = ["sentencepiece"]
def __init__(self : Dict , *snake_case__ : Any , **snake_case__ : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Optional[Any] = ["sentencepiece"]
def __init__(self : int , *snake_case__ : Tuple , **snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Union[str, Any] = ["sentencepiece"]
def __init__(self : List[str] , *snake_case__ : str , **snake_case__ : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : str = ["sentencepiece"]
def __init__(self : List[str] , *snake_case__ : str , **snake_case__ : int ) -> int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Dict = ["sentencepiece"]
def __init__(self : str , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : List[str] = ["sentencepiece"]
def __init__(self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : List[str] ) -> int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Tuple = ["sentencepiece"]
def __init__(self : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : int = ["sentencepiece"]
def __init__(self : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Tuple = ["sentencepiece"]
def __init__(self : Union[str, Any] , *snake_case__ : Optional[Any] , **snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : str = ["sentencepiece"]
def __init__(self : List[str] , *snake_case__ : str , **snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : List[Any] = ["sentencepiece"]
def __init__(self : Tuple , *snake_case__ : Any , **snake_case__ : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : List[str] = ["sentencepiece"]
def __init__(self : str , *snake_case__ : int , **snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : List[str] = ["sentencepiece"]
def __init__(self : int , *snake_case__ : List[Any] , **snake_case__ : Tuple ) -> int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Union[str, Any] = ["sentencepiece"]
def __init__(self : List[str] , *snake_case__ : Tuple , **snake_case__ : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Union[str, Any] = ["sentencepiece"]
def __init__(self : int , *snake_case__ : List[str] , **snake_case__ : Any ) -> int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Tuple = ["sentencepiece"]
def __init__(self : int , *snake_case__ : List[Any] , **snake_case__ : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : str = ["sentencepiece"]
def __init__(self : Tuple , *snake_case__ : Tuple , **snake_case__ : List[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : str = ["sentencepiece"]
def __init__(self : int , *snake_case__ : Any , **snake_case__ : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : List[Any] = ["sentencepiece"]
def __init__(self : int , *snake_case__ : Dict , **snake_case__ : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Tuple = ["sentencepiece"]
def __init__(self : str , *snake_case__ : int , **snake_case__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : List[Any] = ["sentencepiece"]
def __init__(self : str , *snake_case__ : Tuple , **snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Tuple = ["sentencepiece"]
def __init__(self : Optional[int] , *snake_case__ : str , **snake_case__ : str ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Optional[Any] = ["sentencepiece"]
def __init__(self : Dict , *snake_case__ : int , **snake_case__ : Any ) -> str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Tuple = ["sentencepiece"]
def __init__(self : Dict , *snake_case__ : Tuple , **snake_case__ : str ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : int = ["sentencepiece"]
def __init__(self : List[str] , *snake_case__ : Any , **snake_case__ : str ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : Dict = ["sentencepiece"]
def __init__(self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[str] ) -> str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : str = ["sentencepiece"]
def __init__(self : List[Any] , *snake_case__ : List[Any] , **snake_case__ : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=A_ ):
A__ : List[str] = ["sentencepiece"]
def __init__(self : int , *snake_case__ : str , **snake_case__ : str ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
| 10
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowercase_ ( UpperCAmelCase__ ):
A__ : Any = DistilBertTokenizer
A__ : int = DistilBertTokenizerFast
A__ : Any = True
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase_ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase__ )
UpperCamelCase_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase__ )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 122
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase__ = logging.getLogger(__name__)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if metric == "rouge2":
_UpperCAmelCase : List[str] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase : int = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase : str = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_UpperCAmelCase : List[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
_UpperCAmelCase : Any = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class lowerCAmelCase__ ( pl.Callback ):
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=True ) ->None:
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_UpperCAmelCase : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase : List[Any] = od / "test_results.txt"
_UpperCAmelCase : Union[str, Any] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase : Optional[Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_UpperCAmelCase : Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__ , "a+" ) as writer:
for key in sorted(lowerCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase : Dict = metrics[key]
if isinstance(lowerCamelCase__ , torch.Tensor ):
_UpperCAmelCase : Tuple = val.item()
_UpperCAmelCase : str = F"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase : Optional[int] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->Any:
'''simple docstring'''
try:
_UpperCAmelCase : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase : int = pl_module.model.num_parameters()
_UpperCAmelCase : int = count_trainable_parameters(lowerCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule ) ->int:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase__ , lowerCamelCase__ , "test" )
@rank_zero_only
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 234
| 0
|
from __future__ import annotations
from random import choice
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
return choice(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[int] , lowerCAmelCase: int ) -> int:
_UpperCAmelCase : List[Any] = random_pivot(lowerCAmelCase )
# partition based on pivot
# linear time
_UpperCAmelCase : List[str] = [e for e in lst if e < pivot]
_UpperCAmelCase : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCAmelCase ) < k - 1:
return kth_number(lowerCAmelCase , k - len(lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a :
def __init__( self , A_ = None ):
'''simple docstring'''
if components is None:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Dict = list(A_ )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = len(self )
if size == len(A_ ):
_UpperCAmelCase : Union[str, Any] = [self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception("must have the same size" )
def __sub__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = len(self )
if size == len(A_ ):
_UpperCAmelCase : Optional[Any] = [self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
def __mul__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , (float, int) ):
_UpperCAmelCase : str = [c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
_UpperCAmelCase : int = len(self )
_UpperCAmelCase : Any = [self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception("invalid operand!" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
return Vector(self.__components )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCAmelCase : Any = value
def _UpperCAmelCase ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
_UpperCAmelCase : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _UpperCAmelCase ( self , A_ , A_ = False ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self * other
_UpperCAmelCase : str = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Vector:
assert isinstance(lowerCAmelCase , lowerCAmelCase )
return Vector([0] * dimension )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> Vector:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (isinstance(lowerCAmelCase , lowerCAmelCase ))
_UpperCAmelCase : Tuple = [0] * dimension
_UpperCAmelCase : Optional[Any] = 1
return Vector(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: float , lowerCAmelCase: Vector , lowerCAmelCase: Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and isinstance(lowerCAmelCase , lowerCAmelCase )
and (isinstance(lowerCAmelCase , (int, float) ))
)
return x * scalar + y
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> Vector:
random.seed(lowerCAmelCase )
_UpperCAmelCase : Optional[int] = [random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class a :
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = matrix
_UpperCAmelCase : Union[str, Any] = w
_UpperCAmelCase : str = h
def __str__( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[int] = []
for i in range(self.__height ):
_UpperCAmelCase : List[str] = [
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self , A_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[Any] = []
for i in range(self.__height ):
_UpperCAmelCase : Tuple = [
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
def __mul__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
_UpperCAmelCase : Union[str, Any] = zero_vector(self.__height )
for i in range(self.__height ):
_UpperCAmelCase : List[Any] = [
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(A_ , (int, float) ): # matrix-scalar
_UpperCAmelCase : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.__height
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.__width
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCAmelCase : Tuple = value
else:
raise Exception("change_component: indices out of bounds" )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
_UpperCAmelCase : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
_UpperCAmelCase : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception("Indices out of bounds" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCAmelCase : Dict = [
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Matrix:
_UpperCAmelCase : list[list[float]] = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> Matrix:
random.seed(lowerCAmelCase )
_UpperCAmelCase : list[list[float]] = [
[random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 189
| 1
|
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
__A = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
__A = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCamelCase__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any=4 , UpperCAmelCase : str=False ):
__lowerCamelCase : Dict = compute_bleu(
reference_corpus=UpperCAmelCase , translation_corpus=UpperCAmelCase , max_order=UpperCAmelCase , smooth=UpperCAmelCase )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 135
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: PreTrainedTokenizer , _lowerCamelCase: int , _lowerCamelCase: Optional[int] = None , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[int] = {}
if train_file is not None:
__lowerCamelCase : List[Any] = [train_file]
if eval_file is not None:
__lowerCamelCase : List[Any] = [eval_file]
if test_file is not None:
__lowerCamelCase : Optional[int] = [test_file]
__lowerCamelCase : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = list(ds[list(files.keys() )[0]].features.keys() )
__lowerCamelCase : Optional[Any] = features_name.pop(_lowerCamelCase )
__lowerCamelCase : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCamelCase : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
__lowerCamelCase : Dict = tokenizer.model_input_names
__lowerCamelCase : int = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
__lowerCamelCase : Optional[int] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
__lowerCamelCase : Optional[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCamelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCamelCase : str = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCamelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : Dict = labelaid[ex[label_name]]
yield (d, label)
__lowerCamelCase : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCamelCase : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCamelCase : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCamelCase : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCamelCase : int = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCamelCase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__A = logging.getLogger(__name__)
@dataclass
class _snake_case :
snake_case__ = field(metadata={"help": "Which column contains the label"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the training file"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the development file"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the test file"} )
snake_case__ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _snake_case :
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=a__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(default=a__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> str:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowerCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowerCamelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase: EvalPrediction ) -> Dict:
__lowerCamelCase : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCamelCase : Dict = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCamelCase : Any = trainer.evaluate()
__lowerCamelCase : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 135
| 1
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__ ( lowercase_ ) -> list[list[float]]:
"""simple docstring"""
_UpperCamelCase : int = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_UpperCamelCase : Tuple = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
_UpperCamelCase : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]]
_UpperCamelCase, _UpperCamelCase : Optional[int] = matrix[1][1], matrix[0][0]
_UpperCamelCase, _UpperCamelCase : Any = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_UpperCamelCase : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
_UpperCamelCase : Union[str, Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_UpperCamelCase : str = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_UpperCamelCase : Any = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_UpperCamelCase : Tuple = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_UpperCamelCase : int = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_UpperCamelCase : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_UpperCamelCase : str = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_UpperCamelCase : Dict = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_UpperCamelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_UpperCamelCase : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_UpperCamelCase : str = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
_UpperCamelCase : Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_UpperCamelCase : Tuple = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase_ )
# Calculate the inverse of the matrix
return [[float(d(lowercase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 310
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
def _A ( _lowercase = 10_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 1, 1
__UpperCamelCase = []
for i in range(1 , n + 1 ):
__UpperCamelCase = prev_numerator + 2 * prev_denominator
__UpperCamelCase = prev_numerator + prev_denominator
if len(str(_lowercase ) ) > len(str(_lowercase ) ):
result.append(_lowercase )
__UpperCamelCase = numerator
__UpperCamelCase = denominator
return len(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310
|
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
import argparse
__A : List[Any] = '''docs/source/_static/js/custom.js'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
with open(_UpperCAmelCase, encoding='utf-8', newline='\n' ) as f:
lowerCAmelCase : Union[str, Any] = f.readlines()
lowerCAmelCase : List[str] = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowerCAmelCase : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(_UpperCAmelCase, 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
__A : str = parser.parse_args()
update_custom_js(args.version)
| 323
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int]) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any]) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any) ->Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : int) ->Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any]) ->Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict) ->Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict) ->int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str]) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
| 10
|
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10
| 1
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : int = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
UpperCamelCase__ : List[Any] = {
'b0': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1_408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1_536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1_792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2_048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2_304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2_560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : int = EfficientNetConfig()
A_ : List[str] = CONFIG_MAP[model_name]["""hidden_dim"""]
A_ : Optional[Any] = CONFIG_MAP[model_name]["""width_coef"""]
A_ : List[str] = CONFIG_MAP[model_name]["""depth_coef"""]
A_ : Dict = CONFIG_MAP[model_name]["""image_size"""]
A_ : Tuple = CONFIG_MAP[model_name]["""dropout_rate"""]
A_ : Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""]
A_ : Tuple = """huggingface/label-files"""
A_ : List[Any] = """imagenet-1k-id2label.json"""
A_ : Any = 1_0_0_0
A_ : int = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(a_ ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
A_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : str = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = CONFIG_MAP[model_name]["""image_size"""]
A_ : Dict = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=a_ , )
return preprocessor
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
A_ : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
A_ : List[str] = sorted(set(a_ ) )
A_ : List[str] = len(a_ )
A_ : Optional[int] = {b: str(a_ ) for b, i in zip(a_ , range(a_ ) )}
A_ : Dict = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
A_ : Dict = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
A_ : int = {}
for item in rename_keys:
if item[0] in original_param_names:
A_ : Dict = """efficientnet.""" + item[1]
A_ : Optional[Any] = """classifier.weight"""
A_ : Union[str, Any] = """classifier.bias"""
return key_mapping
def UpperCAmelCase ( a_ , a_ , a_ ) -> int:
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
A_ : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
A_ : Tuple = torch.from_numpy(a_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A_ : Optional[int] = torch.from_numpy(a_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A_ : Any = torch.from_numpy(np.transpose(a_ ) )
else:
A_ : int = torch.from_numpy(a_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(a_ )
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = model_classes[model_name](
include_top=a_ , weights="""imagenet""" , input_tensor=a_ , input_shape=a_ , pooling=a_ , classes=1_0_0_0 , classifier_activation="""softmax""" , )
A_ : Union[str, Any] = original_model.trainable_variables
A_ : Optional[Any] = original_model.non_trainable_variables
A_ : Tuple = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A_ : Tuple = param.numpy()
A_ : str = list(tf_params.keys() )
# Load HuggingFace model
A_ : List[Any] = get_efficientnet_config(a_ )
A_ : Dict = EfficientNetForImageClassification(a_ ).eval()
A_ : List[str] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
A_ : int = rename_keys(a_ )
replace_params(a_ , a_ , a_ )
# Initialize preprocessor and preprocess input image
A_ : List[str] = convert_image_processor(a_ )
A_ : Tuple = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
A_ : str = hf_model(**a_ )
A_ : Optional[Any] = outputs.logits.detach().numpy()
# Original model inference
A_ : List[Any] = False
A_ : Optional[Any] = CONFIG_MAP[model_name]["""image_size"""]
A_ : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A_ : str = image.img_to_array(a_ )
A_ : Any = np.expand_dims(a_ , axis=0 )
A_ : Union[str, Any] = original_model.predict(a_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(a_ , a_ , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(a_ ):
os.mkdir(a_ )
# Save converted model and image processor
hf_model.save_pretrained(a_ )
preprocessor.save_pretrained(a_ )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
A_ : Tuple = F"efficientnet-{model_name}"
preprocessor.push_to_hub(a_ )
hf_model.push_to_hub(a_ )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
UpperCamelCase__ : str = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 164
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Any = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164
| 1
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : Union[str, Any] =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : List[str] =logging.getLogger()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCamelCase__ : Dict = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="eval" ) -> Optional[Any]:
UpperCamelCase__ : Tuple = os.path.join(__lowerCAmelCase , f'{split}_results.json' )
if os.path.exists(__lowerCAmelCase ):
with open(__lowerCAmelCase , "r" ) as f:
return json.load(__lowerCAmelCase )
raise ValueError(f'can\'t find {path}' )
lowerCamelCase : int =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __a ( A__ ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[str] = F'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ):
run_flax_glue.main()
UpperCamelCase__ : Dict = get_results(SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : str = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Any = F'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ):
run_clm_flax.main()
UpperCamelCase__ : Union[str, Any] = get_results(SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 1_00 )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[Any] = F'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ):
run_summarization_flax.main()
UpperCamelCase__ : List[Any] = get_results(SCREAMING_SNAKE_CASE , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Any = F'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ):
run_mlm_flax.main()
UpperCamelCase__ : List[str] = get_results(SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[Any] = F'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ):
run_ta_mlm_flax.main()
UpperCamelCase__ : Optional[Any] = get_results(SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.4_2 )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = 7 if get_gpu_count() > 1 else 2
UpperCamelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[str] = F'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ):
run_flax_ner.main()
UpperCamelCase__ : Optional[int] = get_results(SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Dict = F'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ):
run_qa.main()
UpperCamelCase__ : Optional[int] = get_results(SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 189
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCamelCase : str ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase : str ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowerCamelCase : str ={'''facebook/blenderbot-3B''': 128}
class __a ( A__ ):
_lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : int = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : Tuple = BlenderbotTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple="replace" , SCREAMING_SNAKE_CASE : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE : str="<pad>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<mask>" , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=True , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase__ : str = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) )
UpperCamelCase__ : Union[str, Any] = add_prefix_space
UpperCamelCase__ : Union[str, Any] = pre_tok_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = add_prefix_space
UpperCamelCase__ : Optional[int] = "post_processor"
UpperCamelCase__ : Union[str, Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
UpperCamelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase__ : Tuple = tuple(state["sep"] )
if "cls" in state:
UpperCamelCase__ : Optional[int] = tuple(state["cls"] )
UpperCamelCase__ : List[Any] = False
if state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase__ : str = add_prefix_space
UpperCamelCase__ : Optional[Any] = True
if state.get("trim_offsets" , SCREAMING_SNAKE_CASE ) != trim_offsets:
UpperCamelCase__ : Optional[Any] = trim_offsets
UpperCamelCase__ : Tuple = True
if changes_to_apply:
UpperCamelCase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE , state.pop("type" ) )
UpperCamelCase__ : Dict = component_class(**SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Dict = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else value
UpperCamelCase__ : str = value
def __lowercase ( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : Dict = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
UpperCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : "Conversation" ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = " ".join(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = self.encode(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCamelCase__ : Dict = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 189
| 1
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __a ( _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
a__: Any = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
a__: List[Any] = s_dict.pop(lowerCamelCase__ )
elif "subsample" in key:
a__: Tuple = s_dict.pop(lowerCamelCase__ )
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
a__ , a__: Optional[int] = emb.weight.shape
a__: List[Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
a__: Dict = emb.weight.data
return lin_layer
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: List[str] = torch.load(lowerCamelCase__ , map_location='cpu' )
a__: List[Any] = mam_aaa['args']
a__: str = mam_aaa['model']
a__: List[Any] = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(lowerCamelCase__ )
rename_keys(lowerCamelCase__ )
a__: List[Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
a__: Union[str, Any] = args.share_decoder_input_output_embed
a__: Optional[Any] = [int(lowerCamelCase__ ) for i in args.conv_kernel_sizes.split(',' )]
a__: str = SpeechaTextConfig(
vocab_size=lowerCamelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(lowerCamelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowerCamelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowerCamelCase__ , num_beams=5 , max_length=200 , use_cache=lowerCamelCase__ , decoder_start_token_id=2 , early_stopping=lowerCamelCase__ , )
a__: Optional[int] = SpeechaTextForConditionalGeneration(lowerCamelCase__ )
a__ , a__: Optional[int] = model.model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0 and not set(lowerCamelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F' but all the following weights are missing {missing}' )
if tie_embeds:
a__: Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__: List[str] = lm_head_weights
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowercase__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 350
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = Dict[str, Any]
lowercase__ = List[Prediction]
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
def __init__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
super().__init__(*lowercase , **lowercase)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , 'vision')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
a__: Optional[Any] = {}
if "threshold" in kwargs:
a__: Dict = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *lowercase , **lowercase) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = load_image(lowercase)
a__: List[Any] = torch.IntTensor([[image.height, image.width]])
a__: Any = self.image_processor(images=[image] , return_tensors='pt')
if self.tokenizer is not None:
a__: Any = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt')
a__: List[str] = target_size
return inputs
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
a__: Any = model_inputs.pop('target_size')
a__: Union[str, Any] = self.model(**lowercase)
a__: List[str] = outputs.__class__({'target_size': target_size, **outputs})
if self.tokenizer is not None:
a__: Union[str, Any] = model_inputs['bbox']
return model_outputs
def lowerCamelCase_ ( self , lowercase , lowercase=0.9) -> Optional[Any]:
'''simple docstring'''
a__: int = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
a__ , a__: str = target_size[0].tolist()
def unnormalize(lowercase):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
]))
a__ , a__: Optional[Any] = model_outputs['logits'].squeeze(0).softmax(dim=-1).max(dim=-1)
a__: str = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
a__: Union[str, Any] = [unnormalize(lowercase) for bbox in model_outputs['bbox'].squeeze(0)]
a__: Dict = ['score', 'label', 'box']
a__: Any = [dict(zip(lowercase , lowercase)) for vals in zip(scores.tolist() , lowercase , lowercase) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
a__: List[str] = self.image_processor.post_process_object_detection(lowercase , lowercase , lowercase)
a__: Tuple = raw_annotations[0]
a__: List[str] = raw_annotation['scores']
a__: int = raw_annotation['labels']
a__: int = raw_annotation['boxes']
a__: List[Any] = scores.tolist()
a__: Any = [self.model.config.idalabel[label.item()] for label in labels]
a__: Dict = [self._get_bounding_box(lowercase) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
a__: Optional[Any] = ['score', 'label', 'box']
a__: List[Any] = [
dict(zip(lowercase , lowercase))
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'])
]
return annotation
def lowerCamelCase_ ( self , lowercase) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')
a__ , a__ , a__ , a__: List[Any] = box.int().tolist()
a__: Optional[int] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 203
| 0
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__snake_case = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__snake_case = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return float((preds == labels).mean() )
def _A ( _lowercase , _lowercase , _lowercase="binary" ) -> Any:
"""simple docstring"""
__UpperCamelCase = simple_accuracy(_lowercase , _lowercase )
__UpperCamelCase = float(fa_score(y_true=_lowercase , y_pred=_lowercase , average=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = {}
for id_pred, label in zip(_lowercase , _lowercase ):
__UpperCamelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
__UpperCamelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__UpperCamelCase = [(pred, label)]
__UpperCamelCase, __UpperCamelCase = [], []
for question, preds_labels in question_map.items():
__UpperCamelCase, __UpperCamelCase = zip(*_lowercase )
__UpperCamelCase = fa_score(y_true=_lowercase , y_pred=_lowercase , average='macro' )
fas.append(_lowercase )
__UpperCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(_lowercase ) )
ems.append(_lowercase )
__UpperCamelCase = float(sum(_lowercase ) / len(_lowercase ) )
__UpperCamelCase = sum(_lowercase ) / len(_lowercase )
__UpperCamelCase = float(fa_score(y_true=_lowercase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase (datasets.Metric ):
def snake_case_ ( self: List[str] ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(self._get_feature_types() ),codebase_urls=[],reference_urls=[],format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None,)
def snake_case_ ( self: Tuple ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def snake_case_ ( self: Any,A_: int,A_: str ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(A_,A_ )}
elif self.config_name == "cb":
return acc_and_fa(A_,A_,fa_avg='macro' )
elif self.config_name == "record":
__UpperCamelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
__UpperCamelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(A_,A_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(A_,A_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(A_,A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 310
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCamelCase__ : List[Any] = '\nimport os\n'
lowerCamelCase__ : Optional[int] = '\ndef foo():\n import os\n return False\n'
lowerCamelCase__ : Optional[Any] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
lowerCamelCase__ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
lowerCamelCase__ : Dict = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
lowerCamelCase__ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
lowerCamelCase__ : int = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
lowerCamelCase__ : int = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
lowerCamelCase__ : List[str] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
lowerCamelCase__ : str = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
lowerCamelCase__ : int = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'test_file.py' )
with open(__UpperCAmelCase , 'w' ) as _tmp_file:
_tmp_file.write(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = get_imports(__UpperCAmelCase )
assert parsed_imports == ["os"]
| 210
|
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = psutil.Process()
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = -1
while True:
SCREAMING_SNAKE_CASE_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE_ = True
self.thread.start()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ : List[str] = PeakCPUMemory()
def UpperCAmelCase_ ( ) -> Tuple:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = torch.cuda.memory_allocated(__UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
SCREAMING_SNAKE_CASE_ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = (torch.cuda.memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
SCREAMING_SNAKE_CASE_ = (torch.cuda.max_memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[Any]:
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(__UpperCAmelCase )]:.2f}MiB" )
SCREAMING_SNAKE_CASE_ = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 210
| 1
|
'''simple docstring'''
import argparse
__UpperCAmelCase = """docs/source/_static/js/custom.js"""
def __A ( lowerCamelCase_ ):
"""simple docstring"""
with open(lowerCamelCase_ , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE : int = f.readlines()
SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
SCREAMING_SNAKE_CASE : str = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
__UpperCAmelCase = parser.parse_args()
update_custom_js(args.version)
| 323
|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323
| 1
|
from typing import List
import numpy as np
def __UpperCamelCase ( lowercase__ : dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : int = {key: len(lowercase__ ) for key, value in gen_kwargs.items() if isinstance(lowercase__ , lowercase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
lowerCAmelCase_ : int = max(lists_lengths.values() , default=0 )
return max(1 , lowercase__ )
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> List[range]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = []
for group_idx in range(lowercase__ ):
lowerCAmelCase_ : Optional[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCAmelCase_ : Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCAmelCase_ : List[str] = range(lowercase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowercase__ )
return shards_indices_per_group
def __UpperCamelCase ( lowercase__ : dict , lowercase__ : int ) -> List[dict]:
'''simple docstring'''
lowerCAmelCase_ : str = _number_of_shards_in_gen_kwargs(lowercase__ )
if num_shards == 1:
return [dict(lowercase__ )]
else:
lowerCAmelCase_ : Tuple = _distribute_shards(num_shards=lowercase__ , max_num_jobs=lowercase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowercase__ , lowercase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowercase__ ) )
]
def __UpperCamelCase ( lowercase__ : List[dict] ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowercase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __UpperCamelCase ( lowercase__ : np.random.Generator , lowercase__ : dict ) -> dict:
'''simple docstring'''
lowerCAmelCase_ : str = {len(lowercase__ ) for value in gen_kwargs.values() if isinstance(lowercase__ , lowercase__ )}
lowerCAmelCase_ : Tuple = {}
for size in list_sizes:
lowerCAmelCase_ : List[Any] = list(range(lowercase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCAmelCase_ : Optional[Any] = dict(lowercase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ : str = [value[i] for i in indices_per_size[len(lowercase__ )]]
return shuffled_kwargs
| 28
|
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowercase__ )] )
lowercase__ = np.array(lowercase__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowercase__ ) ) , x.transpose() ) , lowercase__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
lowercase__ , exog=lowercase__ , order=lowercase__ , seasonal_order=lowercase__ )
lowercase__ = model.fit(disp=lowercase__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(lowercase__ ) , exog=[test_match] )
return result[0]
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowercase__ , lowercase__ )
lowercase__ = regressor.predict(lowercase__ )
return y_pred[0]
def _A ( lowercase__ ):
train_user.sort()
lowercase__ = np.percentile(lowercase__ , 25 )
lowercase__ = np.percentile(lowercase__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def _A ( lowercase__ , lowercase__ ):
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(lowercase__ ) - abs(lowercase__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__A = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
__A = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__A = Normalizer().fit_transform(data_input_df.values)
# split data
__A = normalize_df[:, 2].tolist()
__A = normalize_df[:, 0].tolist()
__A = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__A = normalize_df[:, [1, 2]].tolist()
__A = x[: len(x) - 1]
__A = x[len(x) - 1 :]
# for linear regression & sarimax
__A = total_date[: len(total_date) - 1]
__A = total_user[: len(total_user) - 1]
__A = total_match[: len(total_match) - 1]
__A = total_date[len(total_date) - 1 :]
__A = total_user[len(total_user) - 1 :]
__A = total_match[len(total_match) - 1 :]
# voting system with forecasting
__A = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__A = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 164
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__A = False
class A ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
image=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 164
| 1
|
import torch
from diffusers import DiffusionPipeline
class __A ( UpperCamelCase__ ):
def __init__(self : List[str] , __a : Optional[int] , __a : Optional[int] ):
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
def __call__(self : List[Any] ):
UpperCAmelCase_ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCAmelCase_ = 1
UpperCAmelCase_ = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
UpperCAmelCase_ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
UpperCAmelCase_ = scheduler_output - scheduler_output + torch.ones_like(__UpperCAmelCase )
return result
| 354
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Dict , __a : Any ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__(self : Union[str, Any] , __a : Optional[int] , __a : Optional[int] , __a : int ):
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(__a ) )
if isinstance(__a , __a ):
UpperCAmelCase_ = [sequences]
UpperCAmelCase_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCamelCase__ )
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , __a : Union[str, Any]=ZeroShotClassificationArgumentHandler() , *__a : Optional[int] , **__a : List[str] ):
UpperCAmelCase_ = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _lowercase (self : str ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _lowercase (self : Any , __a : Any , __a : int=True , __a : Dict=True , __a : Any=TruncationStrategy.ONLY_FIRST , **__a : Tuple ):
UpperCAmelCase_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
UpperCAmelCase_ = self.tokenizer.eos_token
try:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase (self : List[str] , **__a : Tuple ):
if kwargs.get("multi_class" , __a ) is not None:
UpperCAmelCase_ = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
UpperCAmelCase_ = {}
if "candidate_labels" in kwargs:
UpperCAmelCase_ = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
UpperCAmelCase_ = kwargs["hypothesis_template"]
UpperCAmelCase_ = {}
if "multi_label" in kwargs:
UpperCAmelCase_ = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(self : Tuple , __a : Union[str, List[str]] , *__a : Optional[Any] , **__a : Tuple , ):
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase_ = args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(__a , **__a )
def _lowercase (self : Optional[int] , __a : Optional[Any] , __a : List[str]=None , __a : Any="This example is {}." ):
UpperCAmelCase_ , UpperCAmelCase_ = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
UpperCAmelCase_ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def _lowercase (self : List[str] , __a : Any ):
UpperCAmelCase_ = inputs["candidate_label"]
UpperCAmelCase_ = inputs["sequence"]
UpperCAmelCase_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _lowercase (self : Optional[Any] , __a : List[str] , __a : Tuple=False ):
UpperCAmelCase_ = [outputs["candidate_label"] for outputs in model_outputs]
UpperCAmelCase_ = [outputs["sequence"] for outputs in model_outputs]
UpperCAmelCase_ = np.concatenate([output["logits"].numpy() for output in model_outputs] )
UpperCAmelCase_ = logits.shape[0]
UpperCAmelCase_ = len(__a )
UpperCAmelCase_ = N // n
UpperCAmelCase_ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase_ = self.entailment_id
UpperCAmelCase_ = -1 if entailment_id == 0 else 0
UpperCAmelCase_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase_ = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 106
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCamelCase =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = 0.0 , A_ = 50 , A_ = None , A_ = "pil" , A_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , A_ ):
__UpperCamelCase =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__UpperCamelCase =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(A_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__UpperCamelCase =randn_tensor(A_ , generator=A_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCamelCase =self.unet(A_ , A_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCamelCase =self.scheduler.step(
A_ , A_ , A_ , eta=A_ , use_clipped_model_output=A_ , generator=A_ ).prev_sample
__UpperCamelCase =(image / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase =self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 62
|
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__snake_case = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def __lowerCAmelCase ( lowercase : str = "dhaka" , lowercase : int = 5 ) -> int:
"""simple docstring"""
snake_case : List[Any] = min(lowercase , 50 ) # Prevent abuse!
snake_case : Optional[Any] = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
snake_case : str = requests.get("https://www.google.com/search" , params=lowercase , headers=lowercase )
snake_case : List[str] = BeautifulSoup(html.text , "html.parser" )
snake_case : List[Any] = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
snake_case : Optional[Any] = json.dumps(lowercase )
snake_case : str = json.loads(lowercase )
snake_case : List[str] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , lowercase , )
if not matched_google_image_data:
return 0
snake_case : List[str] = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(lowercase ) , )
snake_case : Dict = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , lowercase , )
for index, fixed_full_res_image in enumerate(lowercase ):
if index >= max_images:
return index
snake_case : List[str] = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
snake_case : Dict = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
snake_case : int = urllib.request.build_opener()
snake_case : int = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(lowercase )
snake_case : Optional[int] = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
urllib.request.urlretrieve( # noqa: S310
lowercase , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__snake_case = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print("""Please provide a search term.""")
raise
| 203
| 0
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase_ ( _A , _A , _A , _A , _A = None , _A = None , _A = None , ):
'''simple docstring'''
if config_name_or_path is None:
SCREAMING_SNAKE_CASE__ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = question_encoder_name_or_path
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE__ = RagConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = gen_config
SCREAMING_SNAKE_CASE__ = question_encoder_config
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained_question_encoder_generator(
_A , _A , config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 357
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 218
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__a : Optional[int] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__a : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__a : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
__lowercase = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
__lowercase = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
__lowercase = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
__lowercase = text_classifier('''This is great !''' , return_all_scores=lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
__lowercase = text_classifier('''This is great !''' , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
__lowercase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
__lowercase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
import torch
__lowercase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = pipeline('''text-classification''' )
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__lowercase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__lowercase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = pipeline('''text-classification''' , framework='''tf''' )
__lowercase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__lowercase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__lowercase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase = TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__lowercase = '''HuggingFace is in'''
__lowercase = text_classifier(lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'''label''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
__lowercase = ['''HuggingFace is in ''', '''Paris is in France''']
__lowercase = text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{'''label''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ )}, {'''label''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__lowercase = text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ )
__lowercase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{'''label''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ )}] * N, [{'''label''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ )}] * N] , )
__lowercase = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
__lowercase = text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {'''label''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__lowercase = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(lowerCAmelCase__ ):
text_classifier(lowerCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__lowercase = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{'''label''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 210
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a : int = logging.get_logger(__name__)
__a : str = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = '''deta'''
__a : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=9_00 , lowerCAmelCase__=20_48 , lowerCAmelCase__=6 , lowerCAmelCase__=20_48 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=10_24 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=True , lowerCAmelCase__=3_00 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.25 , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = backbone_config.pop('''model_type''' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCAmelCase__ )
__lowercase = backbone_config
__lowercase = num_queries
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
# deformable attributes
__lowercase = num_feature_levels
__lowercase = encoder_n_points
__lowercase = decoder_n_points
__lowercase = two_stage
__lowercase = two_stage_num_proposals
__lowercase = with_box_refine
__lowercase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.d_model
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 210
| 1
|
'''simple docstring'''
UpperCamelCase : List[Any] = 256
# Modulus to hash a string
UpperCamelCase : List[Any] = 1_000_003
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> bool:
"""simple docstring"""
a : Dict = len(snake_case )
a : Dict = len(snake_case )
if p_len > t_len:
return False
a : int = 0
a : int = 0
a : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case ):
a : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
a : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
a : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
a : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a : Union[str, Any] = 'abc1abc12'
a : Union[str, Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
a : List[Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(snake_case , snake_case ) and not rabin_karp(snake_case , snake_case )
# Test 2)
a : Optional[int] = 'ABABX'
a : Optional[int] = 'ABABZABABYABABX'
assert rabin_karp(snake_case , snake_case )
# Test 3)
a : List[Any] = 'AAAB'
a : int = 'ABAAAAAB'
assert rabin_karp(snake_case , snake_case )
# Test 4)
a : int = 'abcdabcy'
a : List[str] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(snake_case , snake_case )
# Test 5)
a : int = 'Lü'
a : Tuple = 'Lüsai'
assert rabin_karp(snake_case , snake_case )
a : str = 'Lue'
assert not rabin_karp(snake_case , snake_case )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 345
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345
| 1
|
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_lowerCamelCase : Dict = get_logger(__name__)
class SCREAMING_SNAKE_CASE ( enum.Enum ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """all_checks"""
_SCREAMING_SNAKE_CASE = """basic_checks"""
_SCREAMING_SNAKE_CASE = """no_checks"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __lowerCamelCase ( A__ , A__ , A__=None ) -> List[Any]:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(A__ ) - set(A__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(A__ ) - set(A__ ) ) )
if len(set(A__ ) - set(A__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(A__ ) - set(A__ ) ) )
UpperCamelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase = ' for ' + verification_name if verification_name is not None else ''
if len(A__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __lowerCamelCase ( A__ , A__ ) -> Tuple:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(A__ ) - set(A__ ) ) > 0:
raise ExpectedMoreSplits(str(set(A__ ) - set(A__ ) ) )
if len(set(A__ ) - set(A__ ) ) > 0:
raise UnexpectedSplits(str(set(A__ ) - set(A__ ) ) )
UpperCamelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(A__ ) > 0:
raise NonMatchingSplitsSizesError(str(A__ ) )
logger.info('All the splits matched successfully.' )
def __lowerCamelCase ( A__ , A__ = True ) -> dict:
"""simple docstring"""
if record_checksum:
UpperCamelCase = shaaaa()
with open(A__ , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'' ):
m.update(A__ )
UpperCamelCase = m.hexdigest()
else:
UpperCamelCase = None
return {"num_bytes": os.path.getsize(A__ ), "checksum": checksum}
def __lowerCamelCase ( A__ ) -> Optional[int]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 28
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
| 1
|
'''simple docstring'''
from math import isqrt, loga
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A , A ):
UpperCAmelCase = False
return [i for i in range(2 , A ) if is_prime[i]]
def lowerCamelCase__ ( A : int = 80_08_00 , A : int = 80_08_00 ):
'''simple docstring'''
UpperCAmelCase = degree * loga(A )
UpperCAmelCase = int(A )
UpperCAmelCase = calculate_prime_numbers(A )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 91
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
__magic_name__ : int
__magic_name__ : TreeNode | None = None
__magic_name__ : TreeNode | None = None
_lowercase : Tuple = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase__ ( A : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A ) != count_coins(A ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(A : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.right )
UpperCAmelCase = 1 - left_distrib_excess
UpperCAmelCase = 1 - right_distrib_excess
UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A )
+ abs(A )
)
UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A , A )
return get_distrib(A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : Tuple , __snake_case : Dict=13 , __snake_case : Optional[int]=7 , __snake_case : Optional[int]=True , __snake_case : Dict=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : List[Any]=99 , __snake_case : List[Any]=32 , __snake_case : Union[str, Any]=5 , __snake_case : Optional[int]=4 , __snake_case : Optional[int]=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : int=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=512 , __snake_case : int=16 , __snake_case : List[str]=2 , __snake_case : Dict=0.02 , __snake_case : Tuple=4 , ) -> Union[str, Any]:
UpperCAmelCase : int = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Tuple = use_attention_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Any = type_vocab_size
UpperCAmelCase : List[Any] = type_sequence_label_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = num_choices
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_attention_mask:
UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : str = FlaxRobertaModelTester(self )
@slow
def A ( self : Optional[Any] ) -> int:
for model_class_name in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained('''roberta-base''' , from_pt=__snake_case )
UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
| 23
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCamelCase : str = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A = logging.get_logger(__name__)
A = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''longformer'''
def __init__( self , _UpperCAmelCase = 512 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0 , _UpperCAmelCase = 2 , _UpperCAmelCase = 30522 , _UpperCAmelCase = 768 , _UpperCAmelCase = 12 , _UpperCAmelCase = 12 , _UpperCAmelCase = 3072 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 512 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = False , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : int = attention_window
__a : Tuple = sep_token_id
__a : Union[str, Any] = bos_token_id
__a : int = eos_token_id
__a : int = vocab_size
__a : Any = hidden_size
__a : Tuple = num_hidden_layers
__a : str = num_attention_heads
__a : List[Any] = hidden_act
__a : Any = intermediate_size
__a : List[str] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : Any = type_vocab_size
__a : Any = initializer_range
__a : str = layer_norm_eps
__a : Any = onnx_export
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : Optional[int] = True
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowerCamelCase ( self ):
__a : Tuple = super().outputs
if self.task == "default":
__a : List[str] = {0: '''batch'''}
return outputs
@property
def _lowerCamelCase ( self ):
return 1e-4
@property
def _lowerCamelCase ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
__a : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__a : Optional[int] = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__a : Dict = 1
return inputs
| 188
|
"""simple docstring"""
def __A ( a_ :int = 1_00_00_00) -> int:
__a : Tuple = [i - 1 for i in range(limit + 1)]
for i in range(2 , limit + 1):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , a_):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1])
if __name__ == "__main__":
print(solution())
| 188
| 1
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __a ( lowerCAmelCase_ ):
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[str] = tempfile.mkdtemp()
lowercase__: Optional[Any] = 5
# Realm tok
lowercase__: Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowercase__: Optional[int] = os.path.join(__snake_case , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase__: Dict = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(__snake_case , exist_ok=__snake_case )
def SCREAMING_SNAKE_CASE__ ( self ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Any = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[str] = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=__snake_case , )
return block_records
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: str = self.get_config()
lowercase__: Any = self.get_dummy_retriever()
lowercase__: Any = retriever.tokenizer
lowercase__: Union[str, Any] = np.array([0, 3] , dtype='long' )
lowercase__: List[Any] = tokenizer(['Test question'] ).input_ids
lowercase__: Union[str, Any] = tokenizer(
['the fourth'] , add_special_tokens=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , ).input_ids
lowercase__: Optional[Any] = config.reader_seq_len
lowercase__ , lowercase__ , lowercase__ , lowercase__: Tuple = retriever(
__snake_case , __snake_case , answer_ids=__snake_case , max_length=__snake_case , return_tensors='np' )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: str = self.get_config()
lowercase__: int = self.get_dummy_retriever()
lowercase__: Optional[Any] = retriever.tokenizer
lowercase__: Any = np.array([0, 3, 5] , dtype='long' )
lowercase__: Any = tokenizer(['Test question'] ).input_ids
lowercase__: int = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , ).input_ids
lowercase__: int = config.reader_seq_len
lowercase__ , lowercase__ , lowercase__ , lowercase__: Dict = retriever(
__snake_case , __snake_case , answer_ids=__snake_case , max_length=__snake_case , return_tensors='np' )
self.assertEqual([False, True, True] , __snake_case )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __snake_case )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __snake_case )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
lowercase__: Optional[int] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
lowercase__: Any = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase__: str = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 196
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCAmelCase : Optional[Any] = False
class __magic_name__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a =torch.manual_seed(0 )
__a =pipe(
image=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 218
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: List[str]=() , _UpperCamelCase: Any=None , _UpperCamelCase: Dict="no" , _UpperCamelCase: Any="29500" ) -> Tuple:
"""simple docstring"""
_snake_case = False
_snake_case = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
_snake_case = True
elif "IPython" in sys.modules:
_snake_case = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
_snake_case = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
_snake_case = 8
_snake_case = PrepareForLaunch(_lowerCAmelCase , distributed_type="TPU" )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*_lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="127.0.01" , master_port=_lowerCAmelCase , mixed_precision=_lowerCAmelCase ):
_snake_case = PrepareForLaunch(_lowerCAmelCase , distributed_type="MULTI_GPU" )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_snake_case = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*_lowerCAmelCase )
def __a ( _UpperCamelCase: str , _UpperCamelCase: int=() , _UpperCamelCase: Dict=2 ) -> int:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
_snake_case = PrepareForLaunch(_lowerCAmelCase , debug=_lowerCAmelCase )
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="fork" )
| 354
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCamelCase_ : int = '''pytorch_model.bin'''
UpperCamelCase_ : str = '''pytorch_model.bin.index.json'''
UpperCamelCase_ : int = '''adapter_config.json'''
UpperCamelCase_ : str = '''adapter_model.bin'''
UpperCamelCase_ : str = '''adapter_model.safetensors'''
UpperCamelCase_ : List[Any] = '''tf_model.h5'''
UpperCamelCase_ : Union[str, Any] = '''tf_model.h5.index.json'''
UpperCamelCase_ : Tuple = '''model.ckpt'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack.index.json'''
UpperCamelCase_ : Dict = '''model.safetensors'''
UpperCamelCase_ : List[Any] = '''model.safetensors.index.json'''
UpperCamelCase_ : Tuple = '''config.json'''
UpperCamelCase_ : List[str] = '''preprocessor_config.json'''
UpperCamelCase_ : List[Any] = FEATURE_EXTRACTOR_NAME
UpperCamelCase_ : Union[str, Any] = '''generation_config.json'''
UpperCamelCase_ : str = '''modelcard.json'''
UpperCamelCase_ : List[Any] = '''▁'''
UpperCamelCase_ : Tuple = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCamelCase_ : Any = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCamelCase_ : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCamelCase_ : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __a ( _UpperCamelCase: Optional[Any] ) -> int:
"""simple docstring"""
if version.parse(_UpperCamelCase ) < version.parse(_UpperCamelCase ):
if "dev" in min_version:
_snake_case = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
_snake_case = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 142
| 0
|
UpperCamelCase_ = 256
# Modulus to hash a string
UpperCamelCase_ = 1000003
def lowerCamelCase_ ( _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = len(_a )
UpperCAmelCase_ : List[Any] = len(_a )
if p_len > t_len:
return False
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Optional[int] = 1
# Calculating the hash of pattern and substring of text
for i in range(_a ):
UpperCAmelCase_ : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCAmelCase_ : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCAmelCase_ : Union[str, Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCAmelCase_ : Union[str, Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """abc1abc12"""
UpperCAmelCase_ : Optional[int] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase_ : str = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_a , _a ) and not rabin_karp(_a , _a )
# Test 2)
UpperCAmelCase_ : List[Any] = """ABABX"""
UpperCAmelCase_ : int = """ABABZABABYABABX"""
assert rabin_karp(_a , _a )
# Test 3)
UpperCAmelCase_ : Tuple = """AAAB"""
UpperCAmelCase_ : Dict = """ABAAAAAB"""
assert rabin_karp(_a , _a )
# Test 4)
UpperCAmelCase_ : Union[str, Any] = """abcdabcy"""
UpperCAmelCase_ : Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_a , _a )
# Test 5)
UpperCAmelCase_ : Dict = """Lü"""
UpperCAmelCase_ : Tuple = """Lüsai"""
assert rabin_karp(_a , _a )
UpperCAmelCase_ : Any = """Lue"""
assert not rabin_karp(_a , _a )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 345
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCAmelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Any = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __snake_case ( self : Dict , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : int=None ):
'''simple docstring'''
lowercase :Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase :List[str] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase :List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
lowercase :List[str] = black.format_str(snake_case__ , mode=snake_case__ )
lowercase :Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(snake_case__ , '''w''' , newline='''\n''' ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , '''r''' ) as f:
self.assertTrue(f.read() , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[int] = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(snake_case__ , snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , snake_case__ ) , )
# Copy consistency with a really long name
lowercase :Union[str, Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , snake_case__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , snake_case__ ) , )
| 172
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :Optional[int] , a_ :Union[str, Any] , a_ :Optional[Any]=None) -> List[Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
lowercase :int = nn.Parameter(a_)
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
lowercase :Tuple = nn.Parameter(a_)
def lowerCamelCase (a_ :int , a_ :Any , a_ :Optional[int]) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowercase :str = np.asarray(weights[0])
lowercase :List[Any] = np.asarray(weights[1])
lowercase :Optional[int] = np.asarray(weights[2])
set_param(
torch_layer.self_attention.query_key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , )
def lowerCamelCase (a_ :str , a_ :Any , a_ :Union[str, Any]) -> Dict:
# set torch weights for 1-to-1 comparison
lowercase :str = np.asarray(weights[0])
lowercase :Dict = np.asarray(weights[1])
lowercase :Dict = np.asarray(weights[2])
lowercase :Optional[Any] = np.asarray(weights[3])
set_param(
torch_layer.self_attention.query , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , )
def lowerCamelCase (a_ :Union[str, Any] , a_ :Dict , a_ :Optional[int]) -> Optional[Any]:
# layernorm 1
lowercase :Optional[int] = weights[0][0][0]
lowercase :Union[str, Any] = np.asarray(layer_norm_a[0])
lowercase :List[str] = np.asarray(layer_norm_a[1])
set_param(
torch_block.attention.layer_norm , torch.tensor(a_) , torch.tensor(a_) , )
# lsh weights + output
lowercase :Optional[Any] = weights[0][1]
if len(a_) < 4:
set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_)
else:
set_layer_weights_in_torch_local(a_ , torch_block.attention , a_)
# intermediate weighs
lowercase :Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(a_) == 4:
lowercase :int = intermediate_weights[2]
# layernorm 2
lowercase :int = np.asarray(intermediate_weights[0][0])
lowercase :Union[str, Any] = np.asarray(intermediate_weights[0][1])
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a_) , torch.tensor(a_) , )
# intermediate dense
lowercase :Dict = np.asarray(intermediate_weights[1][0])
lowercase :Optional[Any] = np.asarray(intermediate_weights[1][1])
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , )
# intermediate out
lowercase :Union[str, Any] = np.asarray(intermediate_weights[4][0])
lowercase :Tuple = np.asarray(intermediate_weights[4][1])
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , )
def lowerCamelCase (a_ :Tuple , a_ :Dict , a_ :Tuple) -> str:
# reformer model
lowercase :Union[str, Any] = torch_model.reformer
# word embeds
lowercase :Tuple = np.asarray(weights[1])
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_) , )
if isinstance(weights[3] , a_):
lowercase :str = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights)):
lowercase :List[str] = np.asarray(weights[3][emb_idx][0])
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
lowercase :int = nn.Parameter(torch.tensor(a_))
lowercase :Dict = weights[5]
assert len(torch_model_reformer.encoder.layers) * 4 == len(
a_), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers):
lowercase :Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a_ , a_ , a_)
# output layer norm
lowercase :Dict = np.asarray(weights[7][0])
lowercase :Optional[Any] = np.asarray(weights[7][1])
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a_) , torch.tensor(a_) , )
# output embeddings
lowercase :str = np.asarray(weights[9][0])
lowercase :Union[str, Any] = np.asarray(weights[9][1])
set_param(
torch_model.lm_head.decoder , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , )
def lowerCamelCase (a_ :Optional[Any] , a_ :List[Any] , a_ :Tuple) -> Union[str, Any]:
# Initialise PyTorch model
lowercase :Optional[Any] = ReformerConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
lowercase :Dict = ReformerModelWithLMHead(a_)
with open(a_ , '''rb''') as f:
lowercase :Tuple = pickle.load(a_)['''weights''']
set_model_weights_in_torch(a_ , a_ , config.hidden_size)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , a_)
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 172
| 1
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
SCREAMING_SNAKE_CASE_ : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
SCREAMING_SNAKE_CASE_ : Dict = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__a , output_all_encodings=__a , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
SCREAMING_SNAKE_CASE_ : str = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
SCREAMING_SNAKE_CASE_ : List[Any] = _load_vocab(__a , __a , __a , cls=__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = nlp.model.BERTModel(
__a , len(__a ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__a , use_token_type_embed=__a , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__a , use_decoder=__a , )
original_bort.load_parameters(__a , cast_dtype=__a , ignore_extra=__a )
SCREAMING_SNAKE_CASE_ : List[str] = original_bort._collect_params_with_prefix()
# Build our config 🤗
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__a ),
}
SCREAMING_SNAKE_CASE_ : List[str] = BertConfig.from_dict(__a )
SCREAMING_SNAKE_CASE_ : str = BertForMaskedLM(__a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = hf_param.shape
SCREAMING_SNAKE_CASE_ : Optional[int] = to_torch(params[gluon_param] )
SCREAMING_SNAKE_CASE_ : Tuple = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
SCREAMING_SNAKE_CASE_ : Dict = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
SCREAMING_SNAKE_CASE_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
SCREAMING_SNAKE_CASE_ : BertSelfAttention = layer.attention.self
SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
SCREAMING_SNAKE_CASE_ : Tuple = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
SCREAMING_SNAKE_CASE_ : BertSelfOutput = layer.attention.output
SCREAMING_SNAKE_CASE_ : List[str] = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
SCREAMING_SNAKE_CASE_ : List[str] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
SCREAMING_SNAKE_CASE_ : Tuple = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
SCREAMING_SNAKE_CASE_ : BertIntermediate = layer.intermediate
SCREAMING_SNAKE_CASE_ : List[Any] = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
SCREAMING_SNAKE_CASE_ : BertOutput = layer.output
SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
SCREAMING_SNAKE_CASE_ : int = RobertaTokenizer.from_pretrained('''roberta-base''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode_plus(__a )['''input_ids''']
# Get gluon output
SCREAMING_SNAKE_CASE_ : List[str] = mx.nd.array([input_ids] )
SCREAMING_SNAKE_CASE_ : int = original_bort(inputs=__a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__a )
SCREAMING_SNAKE_CASE_ : List[str] = BertModel.from_pretrained(__a )
hf_bort_model.eval()
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode_plus(__a , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : List[Any] = hf_bort_model(**__a )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = output_gluon[0].asnumpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_hf[0].detach().numpy()
SCREAMING_SNAKE_CASE_ : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
SCREAMING_SNAKE_CASE_ : Tuple = np.allclose(__a , __a , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 91
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (PNDMScheduler,)
__UpperCamelCase = (("num_inference_steps", 5_0),)
def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase_)
return config
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_)
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_)
SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_)
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_)
for i, t in enumerate(scheduler.prk_timesteps):
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample
return sample
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_)
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''):
scheduler.set_timesteps(lowercase_)
elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''):
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1)
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : str = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_)
scheduler.set_timesteps(lowercase_)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.full_loop()
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_98.13_18) < 1e-2
assert abs(result_mean.item() - 0.25_80) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''')
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 67.39_86) < 1e-2
assert abs(result_mean.item() - 0.08_78) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 2_30.03_99) < 1e-2
assert abs(result_mean.item() - 0.29_95) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01)
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_86.94_82) < 1e-2
assert abs(result_mean.item() - 0.24_34) < 1e-3
| 91
| 1
|
_A : List[Any] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 265
|
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
return str(UpperCAmelCase ) == str(UpperCAmelCase )[::-1]
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
return int(UpperCAmelCase ) + int(str(UpperCAmelCase )[::-1] )
def _a ( UpperCAmelCase = 10000 ) -> int:
"""simple docstring"""
lowerCamelCase__ : Tuple = []
for num in range(1 , UpperCAmelCase ):
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Union[str, Any] = num
while iterations < 50:
lowerCamelCase__ : Dict = sum_reverse(UpperCAmelCase )
iterations += 1
if is_palindrome(UpperCAmelCase ):
break
else:
lychrel_nums.append(UpperCAmelCase )
return len(UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265
| 1
|
def UpperCAmelCase__ ( ):
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_A , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 188
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, **lowercase_ ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self, lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
return super().__call__(lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> int:
"""simple docstring"""
a__ ={}
if "candidate_labels" in kwargs:
a__ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
a__ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _UpperCAmelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a sound of {}." ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowercase_, lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
a__ =requests.get(lowercase_ ).content
else:
with open(lowercase_, '''rb''' ) as f:
a__ =f.read()
if isinstance(lowercase_, lowercase_ ):
a__ =ffmpeg_read(lowercase_, self.feature_extractor.sampling_rate )
if not isinstance(lowercase_, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
a__ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
a__ =candidate_labels
a__ =[hypothesis_template.format(lowercase_ ) for x in candidate_labels]
a__ =self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
a__ =[text_inputs]
return inputs
def _UpperCAmelCase ( self, lowercase_ ) -> str:
"""simple docstring"""
a__ =model_inputs.pop('''candidate_labels''' )
a__ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowercase_ ):
a__ =text_inputs[0]
else:
# Batching case.
a__ =text_inputs[0][0]
a__ =self.model(**lowercase_, **lowercase_ )
a__ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _UpperCAmelCase ( self, lowercase_ ) -> Any:
"""simple docstring"""
a__ =model_outputs.pop('''candidate_labels''' )
a__ =model_outputs['''logits'''][0]
if self.framework == "pt":
a__ =logits.softmax(dim=0 )
a__ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
a__ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 188
| 1
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : Tuple = int(UpperCamelCase )
if n_element < 1:
UpperCAmelCase : Tuple = ValueError("""a should be a positive number""" )
raise my_error
UpperCAmelCase : Tuple = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = (0, 0, 0)
UpperCAmelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
A: int = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
A: List[str] = hamming(int(n))
print("-----------------------------------------------------")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("-----------------------------------------------------")
| 76
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
UpperCAmelCase : Optional[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , x.transpose() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Dict = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[str] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.squeeze(_SCREAMING_SNAKE_CASE ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : int = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Dict = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 76
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"gpt-neox-20b": 2_0_4_8,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__( self , _a=None , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ):
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
__a = getattr(_a , pre_tok_state.pop('''type''' ) )
__a = add_prefix_space
__a = pre_tok_class(**_a )
__a = add_prefix_space
def __UpperCAmelCase ( self , _a , _a = None ):
__a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCAmelCase ( self , _a ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 45
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Dict , A : int , A : int , A : int , A : Union[str, Any]=0.0 , A : Optional[int] = None , A : str = "geglu" , A : Optional[int] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : str = "layer_norm" , A : bool = False , ) ->Any:
super().__init__()
lowerCamelCase__ : int = only_cross_attention
lowerCamelCase__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
lowerCamelCase__ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase__ : Optional[Any] = AdaLayerNorm(A , A )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ : int = AdaLayerNormZero(A , A )
else:
lowerCamelCase__ : Dict = nn.LayerNorm(A , elementwise_affine=A )
lowerCamelCase__ : Any = Attention(
query_dim=A , heads=A , dim_head=A , dropout=A , bias=A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase__ : Tuple = (
AdaLayerNorm(A , A )
if self.use_ada_layer_norm
else nn.LayerNorm(A , elementwise_affine=A )
)
lowerCamelCase__ : int = Attention(
query_dim=A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=A , dim_head=A , dropout=A , bias=A , upcast_attention=A , ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Tuple = None
# 3. Feed-forward
lowerCamelCase__ : Optional[int] = nn.LayerNorm(A , elementwise_affine=A )
lowerCamelCase__ : Union[str, Any] = FeedForward(A , dropout=A , activation_fn=A , final_dropout=A )
# let chunk size default to None
lowerCamelCase__ : str = None
lowerCamelCase__ : Tuple = 0
def __lowerCamelCase ( self : Any , A : Optional[int] , A : int ) ->List[str]:
# Sets chunk feed-forward
lowerCamelCase__ : List[Any] = chunk_size
lowerCamelCase__ : List[str] = dim
def __lowerCamelCase ( self : str , A : torch.FloatTensor , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.LongTensor] = None , A : Dict[str, Any] = None , A : Optional[torch.LongTensor] = None , ) ->Tuple:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCamelCase__ : Union[str, Any] = self.norma(A , A )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.norma(
A , A , A , hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase__ : List[str] = self.norma(A )
lowerCamelCase__ : str = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase__ : Any = self.attna(
A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=A , **A , )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Any = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase__ : Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase__ : int = (
self.norma(A , A ) if self.use_ada_layer_norm else self.norma(A )
)
lowerCamelCase__ : int = self.attna(
A , encoder_hidden_states=A , attention_mask=A , **A , )
lowerCamelCase__ : Any = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase__ : Union[str, Any] = self.norma(A )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
lowerCamelCase__ : Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase__ : Optional[int] = torch.cat(
[self.ff(A ) for hid_slice in norm_hidden_states.chunk(A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCamelCase__ : Optional[int] = self.ff(A )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase__ : List[Any] = ff_output + hidden_states
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , A : int , A : Optional[int] = None , A : int = 4 , A : float = 0.0 , A : str = "geglu" , A : bool = False , ) ->int:
super().__init__()
lowerCamelCase__ : List[Any] = int(dim * mult )
lowerCamelCase__ : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase__ : int = GELU(A , A )
if activation_fn == "gelu-approximate":
lowerCamelCase__ : Optional[int] = GELU(A , A , approximate='''tanh''' )
elif activation_fn == "geglu":
lowerCamelCase__ : Any = GEGLU(A , A )
elif activation_fn == "geglu-approximate":
lowerCamelCase__ : int = ApproximateGELU(A , A )
lowerCamelCase__ : Union[str, Any] = nn.ModuleList([] )
# project in
self.net.append(A )
# project dropout
self.net.append(nn.Dropout(A ) )
# project out
self.net.append(nn.Linear(A , A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(A ) )
def __lowerCamelCase ( self : Dict , A : List[Any] ) ->Optional[Any]:
for module in self.net:
lowerCamelCase__ : int = module(A )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Tuple , A : int , A : int , A : str = "none" ) ->Optional[Any]:
super().__init__()
lowerCamelCase__ : List[Any] = nn.Linear(A , A )
lowerCamelCase__ : Any = approximate
def __lowerCamelCase ( self : List[str] , A : Tuple ) ->str:
if gate.device.type != "mps":
return F.gelu(A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __lowerCamelCase ( self : List[str] , A : str ) ->Optional[int]:
lowerCamelCase__ : List[str] = self.proj(A )
lowerCamelCase__ : Optional[int] = self.gelu(A )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Tuple , A : int , A : int ) ->Dict:
super().__init__()
lowerCamelCase__ : Optional[Any] = nn.Linear(A , dim_out * 2 )
def __lowerCamelCase ( self : List[Any] , A : List[Any] ) ->Tuple:
if gate.device.type != "mps":
return F.gelu(A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __lowerCamelCase ( self : Any , A : Union[str, Any] ) ->Any:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.proj(A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(A )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , A : int , A : int ) ->str:
super().__init__()
lowerCamelCase__ : Optional[int] = nn.Linear(A , A )
def __lowerCamelCase ( self : Union[str, Any] , A : Dict ) ->Optional[Any]:
lowerCamelCase__ : List[str] = self.proj(A )
return x * torch.sigmoid(1.7_02 * x )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int , A : Dict , A : Optional[Any] ) ->str:
super().__init__()
lowerCamelCase__ : List[str] = nn.Embedding(A , A )
lowerCamelCase__ : str = nn.SiLU()
lowerCamelCase__ : int = nn.Linear(A , embedding_dim * 2 )
lowerCamelCase__ : Optional[Any] = nn.LayerNorm(A , elementwise_affine=A )
def __lowerCamelCase ( self : int , A : Union[str, Any] , A : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = self.linear(self.silu(self.emb(A ) ) )
lowerCamelCase__ , lowerCamelCase__ : List[str] = torch.chunk(A , 2 )
lowerCamelCase__ : Any = self.norm(A ) * (1 + scale) + shift
return x
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str , A : Optional[Any] , A : int ) ->str:
super().__init__()
lowerCamelCase__ : Union[str, Any] = CombinedTimestepLabelEmbeddings(A , A )
lowerCamelCase__ : int = nn.SiLU()
lowerCamelCase__ : List[str] = nn.Linear(A , 6 * embedding_dim , bias=A )
lowerCamelCase__ : str = nn.LayerNorm(A , elementwise_affine=A , eps=1e-6 )
def __lowerCamelCase ( self : List[str] , A : Any , A : List[Any] , A : Tuple , A : Dict=None ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] = self.linear(self.silu(self.emb(A , A , hidden_dtype=A ) ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = emb.chunk(6 , dim=1 )
lowerCamelCase__ : List[Any] = self.norm(A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , A : int , A : int , A : int , A : Optional[str] = None , A : float = 1e-5 ) ->Any:
super().__init__()
lowerCamelCase__ : int = num_groups
lowerCamelCase__ : List[str] = eps
if act_fn is None:
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Dict = get_activation(A )
lowerCamelCase__ : Any = nn.Linear(A , out_dim * 2 )
def __lowerCamelCase ( self : List[str] , A : Optional[int] , A : str ) ->Tuple:
if self.act:
lowerCamelCase__ : Union[str, Any] = self.act(A )
lowerCamelCase__ : Optional[Any] = self.linear(A )
lowerCamelCase__ : Optional[Any] = emb[:, :, None, None]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = emb.chunk(2 , dim=1 )
lowerCamelCase__ : str = F.group_norm(A , self.num_groups , eps=self.eps )
lowerCamelCase__ : Dict = x * (1 + scale) + shift
return x
| 142
| 0
|
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : list[list[int]] ) -> bool:
A_ = len(_UpperCamelCase )
# We need to create solution object to save path.
A_ = [[0 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
A_ = run_maze(_UpperCamelCase, 0, 0, _UpperCamelCase )
if solved:
print('''\n'''.join(str(_UpperCamelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _UpperCAmelCase ( _UpperCamelCase : list[list[int]], _UpperCamelCase : int, _UpperCamelCase : int, _UpperCamelCase : list[list[int]] ) -> bool:
A_ = len(_UpperCamelCase )
# Final check point.
if i == j == (size - 1):
A_ = 1
return True
A_ = (not i < 0) and (not j < 0) # Check lower bounds
A_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A_ = 1
# check for directions
if (
run_maze(_UpperCamelCase, i + 1, _UpperCamelCase, _UpperCamelCase )
or run_maze(_UpperCamelCase, _UpperCamelCase, j + 1, _UpperCamelCase )
or run_maze(_UpperCamelCase, i - 1, _UpperCamelCase, _UpperCamelCase )
or run_maze(_UpperCamelCase, _UpperCamelCase, j - 1, _UpperCamelCase )
):
return True
A_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> Optional[Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
A_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = self.num_labels
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self ) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def __A ( self ) -> Tuple:
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Dict:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A_ = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> int:
pass
@slow
def __A ( self ) -> Dict:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = '''add'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
A_ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A_ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_SCREAMING_SNAKE_CASE )
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = knapsack(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index + 1 )
if weights[index] <= max_weight:
__snake_case : int = values[index] + knapsack(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , max_weight - weights[index] , index + 1 )
return max(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[int]= logging.get_logger(__name__)
_a : Dict= {"vocab_file": "spiece.model"}
_a : int= {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
_a : int= {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_a : Optional[Any]= "▁"
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : Optional[Any] , _A : List[str] , _A : int=True , _A : Optional[int]=True , _A : Any=False , _A : str="[CLS]" , _A : Dict="[SEP]" , _A : Any="<unk>" , _A : List[Any]="[SEP]" , _A : Any="<pad>" , _A : List[str]="[CLS]" , _A : int="[MASK]" , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : Dict = (
AddedToken(_A , lstrip=_A , rstrip=_A , normalized=_A)
if isinstance(_A , _A)
else mask_token
)
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__snake_case : Optional[int] = do_lower_case
__snake_case : Any = remove_space
__snake_case : Any = keep_accents
__snake_case : Dict = vocab_file
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_A)
@property
def _lowercase (self : str) -> Optional[Any]:
return len(self.sp_model)
def _lowercase (self : Dict) -> List[str]:
__snake_case : int = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self : Optional[int]) -> int:
__snake_case : List[Any] = self.__dict__.copy()
__snake_case : Any = None
return state
def __setstate__(self : Union[str, Any] , _A : Optional[Any]) -> int:
__snake_case : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case : Optional[int] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowercase (self : Tuple , _A : int) -> int:
if self.remove_space:
__snake_case : int = ' '.join(inputs.strip().split())
else:
__snake_case : str = inputs
__snake_case : Optional[int] = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__snake_case : Tuple = unicodedata.normalize('NFKD' , _A)
__snake_case : Optional[Any] = ''.join([c for c in outputs if not unicodedata.combining(_A)])
if self.do_lower_case:
__snake_case : Union[str, Any] = outputs.lower()
return outputs
def _lowercase (self : Any , _A : str) -> List[str]:
__snake_case : Union[str, Any] = self.preprocess_text(_A)
__snake_case : Optional[Any] = self.sp_model.encode(_A , out_type=_A)
__snake_case : Tuple = []
for piece in pieces:
if len(_A) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__snake_case : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__snake_case : Any = cur_pieces[1:]
else:
__snake_case : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_A)
else:
new_pieces.append(_A)
return new_pieces
def _lowercase (self : Optional[Any] , _A : List[str]) -> int:
return self.sp_model.PieceToId(_A)
def _lowercase (self : Optional[int] , _A : Tuple) -> Union[str, Any]:
return self.sp_model.IdToPiece(_A)
def _lowercase (self : Union[str, Any] , _A : Union[str, Any]) -> Dict:
__snake_case : Any = []
__snake_case : Dict = ''
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A) + token
__snake_case : List[str] = True
__snake_case : int = []
else:
current_sub_tokens.append(_A)
__snake_case : int = False
out_string += self.sp_model.decode(_A)
return out_string.strip()
def _lowercase (self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Dict = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase (self : Tuple , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
if token_ids_a is not None:
return [1] + ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1]
return [1] + ([0] * len(_A)) + [1]
def _lowercase (self : str , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : int = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : Optional[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_A):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case : Optional[Any] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _A)
elif not os.path.isfile(self.vocab_file):
with open(_A , 'wb') as fi:
__snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A)
return (out_vocab_file,)
| 172
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
A_ : Any = torch.load(a_ , map_location="""cpu""" )
if "model" in sd.keys():
A_ : int = torch.load(a_ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
A_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(a_ )
A_ : Tuple = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A_ : int = sd.pop(a_ )
A_ : str = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A_ : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
A_ : List[str] = key.replace(""".qkv_proj.""" , """.q_proj.""" )
A_ : int = key.replace(""".qkv_proj.""" , """.k_proj.""" )
A_ : int = key.replace(""".qkv_proj.""" , """.v_proj.""" )
A_ : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A_ , A_ , A_ : Tuple = torch.split(a_ , depth // 3 , dim=0 )
A_ : str = q
A_ : Any = k
A_ : Union[str, Any] = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_=None ) -> Any:
"""simple docstring"""
A_ : Union[str, Any] = load_checkpoint(a_ )
if config is not None:
A_ : Optional[int] = OPTConfig.from_pretrained(a_ )
else:
A_ : Dict = OPTConfig()
A_ : Optional[int] = OPTModel(a_ ).half().eval()
model.load_state_dict(a_ )
# Check results
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 164
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Any = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase = 1_0_0_0 ) -> int:
return sum(e for e in range(3 , _lowercase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : List[str] = 2_5_0_0_0_4
a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
UpperCAmelCase : int = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : int = 1
return cls
def _lowercase( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[str]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : int = 10
UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Dict = targets["""input_ids"""]
UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 265
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCamelCase = DDIMScheduler()
torch.manual_seed(0)
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0)
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCamelCase = CLIPTextModel(lowercase_)
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[Any]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = torch.manual_seed(lowercase_)
_UpperCamelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3)
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = "french fries"
_UpperCamelCase = sd_pipe(**lowercase_ , negative_prompt=lowercase_)
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_ , view_batch_size=2)
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear")
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , skip_prk_steps=lowercase_)
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any=0) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = torch.manual_seed(lowercase_)
_UpperCamelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "stabilityai/stable-diffusion-2-base"
_UpperCamelCase = DDIMScheduler.from_pretrained(lowercase_ , subfolder="scheduler")
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCamelCase = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
])
assert np.abs(expected_slice - image_slice).max() < 1e-2
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowercase_)
_UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = 0
def callback_fn(lowercase_ : int , lowercase_ : int , lowercase_ : torch.FloatTensor) -> None:
_UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
_UpperCamelCase = False
_UpperCamelCase = "stabilityai/stable-diffusion-2-base"
_UpperCamelCase = DDIMScheduler.from_pretrained(lowercase_ , subfolder="scheduler")
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
pipe(**lowercase_ , callback=lowercase_ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = "stabilityai/stable-diffusion-2-base"
_UpperCamelCase = DDIMScheduler.from_pretrained(lowercase_ , subfolder="scheduler")
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_)
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 350
|
import math
class _UpperCAmelCase :
'''simple docstring'''
def __UpperCAmelCase ( self : Dict , lowercase_ : list[list[float]] , lowercase_ : list[int]) -> int:
"""simple docstring"""
_UpperCamelCase = 0.0
_UpperCamelCase = 0.0
for i in range(len(lowercase_)):
da += math.pow((sample[i] - weights[0][i]) , 2)
da += math.pow((sample[i] - weights[1][i]) , 2)
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self : Any , lowercase_ : list[list[int | float]] , lowercase_ : list[int] , lowercase_ : int , lowercase_ : float) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(lowercase_)):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase__ ( ) ->None:
'''simple docstring'''
_UpperCamelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase = SelfOrganizingMap()
_UpperCamelCase = 3
_UpperCamelCase = 0.5
for _ in range(a__ ):
for j in range(len(a__ ) ):
# training sample
_UpperCamelCase = training_samples[j]
# Compute the winning vector
_UpperCamelCase = self_organizing_map.get_winner(a__ , a__ )
# Update the winning vector
_UpperCamelCase = self_organizing_map.update(a__ , a__ , a__ , a__ )
# classify test sample
_UpperCamelCase = [0, 0, 0, 1]
_UpperCamelCase = self_organizing_map.get_winner(a__ , a__ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63
| 0
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE : Any = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
SCREAMING_SNAKE_CASE : Tuple = f"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE : Union[str, Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_a , exist_ok=_a)
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(_a , "README.md")
print(f"Generating {path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(_a)
# make sure we are under the root of the project
a_ = Path(__file__).resolve().parent.parent.parent
a_ = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ , a_ , a_ = model_name.split('-')
a_ = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 76
|
import baseaa
def lowerCamelCase__ ( _a):
return baseaa.aaaencode(string.encode("utf-8"))
def lowerCamelCase__ ( _a):
return baseaa.aaadecode(_a).decode("utf-8")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _a :
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
_snake_case = value
_snake_case = None
_snake_case = None
class _a :
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_snake_case = tree
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Any:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __a ( _UpperCamelCase: Tuple ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = os.path.join(args.tf_model_dir , "parameters.json" )
_snake_case = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_snake_case = args.output + ".pt"
_snake_case = OrderedDict()
with tf.device("/CPU:0" ):
_snake_case = tf.train.load_checkpoint(args.tf_model_dir )
_snake_case = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_snake_case = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_snake_case = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_snake_case = 8
_snake_case = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/moe" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_snake_case = key_name[-9:-7]
for i in range(16 ):
_snake_case = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_snake_case = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/mlp" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p1/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/ln" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.feed_forward.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.feed_forward.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/att" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_snake_case = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_snake_case = state[:, 0, :, :]
_snake_case = state[:, 1, :, :]
_snake_case = state[:, 2, :, :]
_snake_case = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/o/kernel" ):
_snake_case = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_snake_case = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/an" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.self_attn.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.self_attn.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_snake_case = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_snake_case = "model.%s.weight" % nlayer
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
if key_name.startswith("model/wte" ):
_snake_case = "lm_head.weight"
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/wob" ):
_snake_case = "final_logits_bias"
_snake_case = vnp.copy() # same in embedded
_snake_case = state.reshape((1, -1) )
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
_snake_case = "model.last_project.weight"
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
_snake_case = "model.last_project.bias"
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
UpperCamelCase_ : Tuple = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
UpperCamelCase_ : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 142
| 0
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class a__ ( A__ ):
A = 42
A = jnp.floataa
A = True
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE_ : Any = nn.Dense(5,dtype=self.dtype )
def __call__( self : Any,*_A : str,**_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().__call__(*_A,**_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class a__ ( A__ ):
A = FlaxBigBirdForNaturalQuestionsModule
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
def cross_entropy(lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = logits.shape[-1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (labels[..., None] == jnp.arange(lowerCAmelCase )[None]).astype("f4" )
SCREAMING_SNAKE_CASE_ : str = jax.nn.log_softmax(lowerCAmelCase , axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = reduction(lowerCAmelCase )
return loss
SCREAMING_SNAKE_CASE_ : str = partial(lowerCAmelCase , reduction=jnp.mean )
SCREAMING_SNAKE_CASE_ : str = cross_entropy(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = cross_entropy(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = cross_entropy(lowerCAmelCase , lowerCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class a__ :
A = "google/bigbird-roberta-base"
A = 3000
A = 10500
A = 128
A = 3
A = 1
A = 5
# tx_args
A = 3E-5
A = 0.0
A = 20000
A = 0.0095
A = "bigbird-roberta-natural-questions"
A = "training-expt"
A = "data/nq-training.jsonl"
A = "data/nq-validation.jsonl"
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
os.makedirs(self.base_dir,exist_ok=_A )
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.base_dir,self.save_dir )
SCREAMING_SNAKE_CASE_ : List[Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class a__ :
A = 42
A = 4096 # no dynamic padding on TPUs
def __call__( self : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.collate_fn(_A )
SCREAMING_SNAKE_CASE_ : str = jax.tree_util.tree_map(_A,_A )
return batch
def __UpperCamelCase ( self : List[str],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.fetch_inputs(features["input_ids"] )
SCREAMING_SNAKE_CASE_ : Any = {
"input_ids": jnp.array(_A,dtype=jnp.intaa ),
"attention_mask": jnp.array(_A,dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"],dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"],dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"],dtype=jnp.intaa ),
}
return batch
def __UpperCamelCase ( self : Tuple,_A : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [self._fetch_inputs(_A ) for ids in input_ids]
return zip(*_A )
def __UpperCamelCase ( self : Optional[int],_A : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [1 for _ in range(len(_A ) )]
while len(_A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Any=None ):
"""simple docstring"""
if seed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset.shuffle(seed=lowerCAmelCase )
for i in range(len(lowerCAmelCase ) // batch_size ):
SCREAMING_SNAKE_CASE_ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCAmelCase )
@partial(jax.pmap , axis_name="batch" )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int] ):
"""simple docstring"""
def loss_fn(lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ : Optional[int] = model_inputs.pop("start_labels" )
SCREAMING_SNAKE_CASE_ : Dict = model_inputs.pop("end_labels" )
SCREAMING_SNAKE_CASE_ : List[str] = model_inputs.pop("pooled_labels" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = state.apply_fn(**lowerCAmelCase , params=lowerCAmelCase , dropout_rng=lowerCAmelCase , train=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = outputs
return state.loss_fn(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = jax.random.split(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = jax.value_and_grad(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = grad_fn(state.params )
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.lax.pmean({"loss": loss} , axis_name="batch" )
SCREAMING_SNAKE_CASE_ : Tuple = jax.lax.pmean(lowerCAmelCase , "batch" )
SCREAMING_SNAKE_CASE_ : List[Any] = state.apply_gradients(grads=lowerCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def _snake_case ( lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = model_inputs.pop("start_labels" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_inputs.pop("end_labels" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_inputs.pop("pooled_labels" )
SCREAMING_SNAKE_CASE_ : str = state.apply_fn(**lowerCAmelCase , params=state.params , train=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = outputs
SCREAMING_SNAKE_CASE_ : Tuple = state.loss_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class a__ ( train_state.TrainState ):
A = struct.field(pytree_node=A__ )
@dataclass
class a__ :
A = 42
A = 42
A = 42
A = 42
A = 42
A = 42
A = None
def __UpperCamelCase ( self : Dict,_A : Union[str, Any],_A : int,_A : Union[str, Any],_A : Dict=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = model.params
SCREAMING_SNAKE_CASE_ : int = TrainState.create(
apply_fn=model.__call__,params=_A,tx=_A,loss_fn=_A,)
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = restore_checkpoint(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = build_tx(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = train_state.TrainState(
step=_A,apply_fn=model.__call__,params=_A,tx=_A,opt_state=_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = args
SCREAMING_SNAKE_CASE_ : Optional[int] = data_collator
SCREAMING_SNAKE_CASE_ : int = lr
SCREAMING_SNAKE_CASE_ : Dict = params
SCREAMING_SNAKE_CASE_ : List[str] = jax_utils.replicate(_A )
return state
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int],_A : List[Any],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.args
SCREAMING_SNAKE_CASE_ : List[Any] = len(_A ) // args.batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : str = jax.random.split(_A,jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE_ : Any = jnp.array(0,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Any = get_batched_dataset(_A,args.batch_size,seed=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for batch in tqdm(_A,total=_A,desc=F'Running EPOCH-{epoch}' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.data_collator(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.train_step_fn(_A,_A,**_A )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE_ : int = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE_ : int = running_loss.item() / i
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.evaluate(_A,_A )
SCREAMING_SNAKE_CASE_ : int = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(_A ) )
self.logger.log(_A,commit=_A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}',state=_A )
def __UpperCamelCase ( self : List[Any],_A : Dict,_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = get_batched_dataset(_A,self.args.batch_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(_A ) // self.args.batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.array(0,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Any = 0
for batch in tqdm(_A,total=_A,desc="Evaluating ... " ):
SCREAMING_SNAKE_CASE_ : int = self.data_collator(_A )
SCREAMING_SNAKE_CASE_ : int = self.val_step_fn(_A,**_A )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def __UpperCamelCase ( self : Union[str, Any],_A : Union[str, Any],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = jax_utils.unreplicate(_A )
print(F'SAVING CHECKPOINT IN {save_dir}',end=" ... " )
self.model_save_fn(_A,params=state.params )
with open(os.path.join(_A,"opt_state.msgpack" ),"wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args,os.path.join(_A,"args.joblib" ) )
joblib.dump(self.data_collator,os.path.join(_A,"data_collator.joblib" ) )
with open(os.path.join(_A,"training_state.json" ),"w" ) as f:
json.dump({"step": state.step.item()},_A )
print("DONE" )
def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : Any ):
"""simple docstring"""
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=" ... " )
with open(os.path.join(lowerCAmelCase , "flax_model.msgpack" ) , "rb" ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowerCAmelCase , "opt_state.msgpack" ) , "rb" ) as f:
SCREAMING_SNAKE_CASE_ : int = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE_ : str = joblib.load(os.path.join(lowerCAmelCase , "args.joblib" ) )
SCREAMING_SNAKE_CASE_ : Any = joblib.load(os.path.join(lowerCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(lowerCAmelCase , "training_state.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = json.load(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE_ : str = optax.linear_schedule(init_value=lowerCAmelCase , end_value=lowerCAmelCase , transition_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = optax.linear_schedule(init_value=lowerCAmelCase , end_value=1E-7 , transition_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ):
"""simple docstring"""
def weight_decay_mask(lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = traverse_util.flatten_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = optax.adamw(learning_rate=lowerCAmelCase , weight_decay=lowerCAmelCase , mask=lowerCAmelCase )
return tx, lr
| 18
|
from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18
| 1
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3_0 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1_0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , ):
UpperCamelCase_: str = parent
UpperCamelCase_: str = batch_size
UpperCamelCase_: int = image_size
UpperCamelCase_: Any = patch_size
UpperCamelCase_: Dict = num_channels
UpperCamelCase_: Dict = is_training
UpperCamelCase_: Any = use_labels
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: int = num_attention_heads
UpperCamelCase_: Optional[int] = intermediate_size
UpperCamelCase_: List[str] = hidden_act
UpperCamelCase_: Optional[Any] = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: Tuple = type_sequence_label_size
UpperCamelCase_: Optional[int] = initializer_range
UpperCamelCase_: Tuple = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_: Tuple = (image_size // patch_size) ** 2
UpperCamelCase_: List[str] = num_patches + 1
def _a ( self ):
UpperCamelCase_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: List[Any] = None
if self.use_labels:
UpperCamelCase_: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: List[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = TFViTModel(config=_lowerCamelCase )
UpperCamelCase_: List[Any] = model(_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCamelCase_: int = self.image_size // 2
UpperCamelCase_: List[str] = pixel_values[:, :, :image_size, :image_size]
UpperCamelCase_: List[Any] = model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase , training=_lowerCamelCase )
UpperCamelCase_: Optional[int] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = self.type_sequence_label_size
UpperCamelCase_: Tuple = TFViTForImageClassification(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCamelCase_: List[Any] = self.image_size // 2
UpperCamelCase_: Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
UpperCamelCase_: int = model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_: Optional[int] = 1
UpperCamelCase_: Tuple = TFViTForImageClassification(_lowerCamelCase )
UpperCamelCase_: str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: int = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ):
UpperCamelCase_: List[str] = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: List[Any] = config_and_inputs
UpperCamelCase_: int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : List[str] =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
a : Tuple =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
a : Tuple =False
a : int =False
a : int =False
def _a ( self ):
UpperCamelCase_: Tuple = TFViTModelTester(self )
UpperCamelCase_: List[str] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ):
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Optional[int] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase_: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Optional[int] = model_class(_lowerCamelCase )
UpperCamelCase_: Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: List[str] = [*signature.parameters.keys()]
UpperCamelCase_: List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: int = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_lowerCamelCase )
def snake_case () -> Optional[int]:
UpperCamelCase_: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _a ( self ):
UpperCamelCase_: str = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
UpperCamelCase_: str = self.default_image_processor
UpperCamelCase_: Any = prepare_img()
UpperCamelCase_: Optional[Any] = image_processor(images=_lowerCamelCase , return_tensors='tf' )
# forward pass
UpperCamelCase_: Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_: List[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_: str = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 292
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def snake_case (UpperCAmelCase__ ) -> Optional[int]: # picklable for multiprocessing
return x.sum()
def snake_case (UpperCAmelCase__ ) -> Any: # picklable for multiprocessing
return i + 1
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
a : int
a : str
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Optional[Any] = {}
UpperCamelCase_: List[str] = []
UpperCamelCase_: Any = 1
UpperCamelCase_: Optional[int] = [1, 2]
UpperCamelCase_: List[str] = {'a': 1, 'b': 2}
UpperCamelCase_: Tuple = {'a': [1, 2], 'b': [3, 4]}
UpperCamelCase_: Optional[int] = {'a': {'1': 1}, 'b': 2}
UpperCamelCase_: Optional[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCamelCase_: Tuple = {}
UpperCamelCase_: str = []
UpperCamelCase_: List[Any] = 2
UpperCamelCase_: List[Any] = [2, 3]
UpperCamelCase_: Optional[Any] = {'a': 2, 'b': 3}
UpperCamelCase_: List[str] = {'a': [2, 3], 'b': [4, 5]}
UpperCamelCase_: Any = {'a': {'1': 2}, 'b': 3}
UpperCamelCase_: List[str] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_: Optional[int] = 2
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_: Tuple = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
UpperCamelCase_: Tuple = {'a': 2, 'b': 0, 'c': 2}
UpperCamelCase_: str = {
'a': np.eye(2 ).astype(_lowerCamelCase ),
'b': np.zeros(3 ).astype(_lowerCamelCase ),
'c': np.ones(2 ).astype(_lowerCamelCase ),
}
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase , num_proc=_lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCamelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCamelCase : x + 1 , _lowerCamelCase , num_proc=_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[Any] = {'a': 1, 'b': 2}
UpperCamelCase_: Dict = {'a': 3, 'b': 4}
UpperCamelCase_: Optional[int] = {'a': 5, 'b': 6}
UpperCamelCase_: int = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) , _lowerCamelCase )
def _a ( self ):
class _lowerCAmelCase:
"""simple docstring"""
a : str ='''bar'''
UpperCamelCase_: int = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_lowerCamelCase , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
UpperCamelCase_: Any = {F'''{i}''': i for i in range(UpperCAmelCase__ )}
UpperCamelCase_: int = map_nested(lambda UpperCAmelCase__ : x + 1_0 , UpperCAmelCase__ , num_proc=UpperCAmelCase__ , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@require_tf
def _a ( self ):
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase_: Dict = layers.Dense(2 )
def gen_random_output():
UpperCamelCase_: Optional[Any] = tf.random.uniform((1, 3) )
return model(_lowerCamelCase ).numpy()
with temp_seed(4_2 , set_tensorflow=_lowerCamelCase ):
UpperCamelCase_: int = gen_random_output()
with temp_seed(4_2 , set_tensorflow=_lowerCamelCase ):
UpperCamelCase_: List[str] = gen_random_output()
UpperCamelCase_: str = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _a ( self ):
import torch
def gen_random_output():
UpperCamelCase_: Any = torch.nn.Linear(3 , 2 )
UpperCamelCase_: Optional[Any] = torch.rand(1 , 3 )
return model(_lowerCamelCase ).detach().numpy()
with temp_seed(4_2 , set_pytorch=_lowerCamelCase ):
UpperCamelCase_: Dict = gen_random_output()
with temp_seed(4_2 , set_pytorch=_lowerCamelCase ):
UpperCamelCase_: str = gen_random_output()
UpperCamelCase_: str = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _a ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
UpperCamelCase_: Optional[Any] = gen_random_output()
with temp_seed(4_2 ):
UpperCamelCase_: Tuple = gen_random_output()
UpperCamelCase_: Optional[int] = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def snake_case (UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: str = NestedDataStructure(UpperCAmelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: Optional[Any] = NestedDataStructure(UpperCAmelCase__ ).flatten()
assert output == expected_output
def snake_case () -> Optional[int]:
UpperCamelCase_: List[Any] = A(x=1 , y='foobar' )
UpperCamelCase_: Optional[int] = {'x': 1, 'y': 'foobar'}
assert asdict(UpperCAmelCase__ ) == expected_output
UpperCamelCase_: List[str] = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]}
UpperCamelCase_: Tuple = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]}
assert asdict(UpperCAmelCase__ ) == expected_output
with pytest.raises(UpperCAmelCase__ ):
asdict([1, A(x=1_0 , y='foo' )] )
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
return text.split()
def snake_case (UpperCAmelCase__ ) -> str:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def snake_case () -> Union[str, Any]:
with Pool(2 ) as pool:
UpperCamelCase_: Optional[Any] = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(UpperCAmelCase__ ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase_: Optional[int] = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(UpperCAmelCase__ ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase_: Any = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase__ )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(UpperCAmelCase__ ) == 4
| 292
| 1
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
__A = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
lowercase__ = os.path.dirname(os.path.realpath(lowercase__ ) )
lowercase__ = os.path.join(lowercase__ , """words.txt""" )
lowercase__ = """"""
with open(lowercase__ ) as f:
lowercase__ = f.readline()
lowercase__ = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
lowercase__ = [
word
for word in [sum(ord(lowercase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 164
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__A = False
class A ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
image=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 164
| 1
|
import os
def a( ) -> List[str]:
"""simple docstring"""
with open(os.path.dirname(A ) + "/grid.txt" ) as f:
a = [] # noqa: E741
for _ in range(20 ):
l.append([int(A ) for x in f.readline().split()] )
a = 0
# right
for i in range(20 ):
for j in range(17 ):
a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a = temp
# down
for i in range(17 ):
for j in range(20 ):
a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 71
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True} )
__A = Features({"audio": Audio()} )
__A = Features({"transcription": Value("string" )} )
__A = "audio"
__A = "transcription"
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
a = copy.deepcopy(self )
a = self.input_schema.copy()
a = features[self.audio_column]
a = input_schema
return task_template
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 71
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
A: int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 109
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63
| 0
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def _snake_case ( A ) -> datetime:
lowerCAmelCase__ = year % 19
lowerCAmelCase__ = year % 4
lowerCAmelCase__ = year % 7
lowerCAmelCase__ = math.floor(year / 100 )
lowerCAmelCase__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase__ = leap_day_inhibits / 4
lowerCAmelCase__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(A , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(A , 4 , 18 )
else:
return datetime(A , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
__UpperCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 228
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A ) -> float:
lowerCAmelCase__ = sorted(numsa + numsa )
lowerCAmelCase__ , lowerCAmelCase__ = divmod(len(A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 228
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCamelCase__ : Any = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
lowerCamelCase__ : Any = s_dict.pop(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = emb.weight.shape
lowerCamelCase__ : str = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : List[Any] = mam_aaa['''args''']
lowerCamelCase__ : Dict = mam_aaa['''model''']
lowerCamelCase__ : Optional[Any] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
lowerCamelCase__ : Tuple = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCamelCase__ : Tuple = args.share_decoder_input_output_embed
lowerCamelCase__ : Dict = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
lowerCamelCase__ : str = SpeechaTextConfig(
vocab_size=UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=UpperCAmelCase , decoder_start_token_id=2 , early_stopping=UpperCAmelCase , )
lowerCamelCase__ : Optional[int] = SpeechaTextForConditionalGeneration(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : Tuple = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 142
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :int = 1
lowercase_ :List[str] = 3
lowercase_ :List[Any] = (32, 32)
lowercase_ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def lowercase__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ :str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ :str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowercase )
@property
def lowercase__ ( self : int ):
"""simple docstring"""
def extract(*lowercase : int , **lowercase : List[Any] ):
class a_ :
def __init__( self : str ):
"""simple docstring"""
lowercase_ :List[Any] = torch.ones([0] )
def lowercase__ ( self : List[str] , lowercase : str ):
"""simple docstring"""
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ :Dict = self.dummy_cond_unet
lowercase_ :List[str] = PNDMScheduler(skip_prk_steps=lowercase )
lowercase_ :Any = self.dummy_vae
lowercase_ :Union[str, Any] = self.dummy_text_encoder
lowercase_ :int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ :List[str] = 77
lowercase_ :List[Any] = self.dummy_image.to(lowercase )
lowercase_ :Dict = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ :Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
lowercase_ :List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase )
lowercase_ :str = alt_pipe.to(lowercase )
alt_pipe.set_progress_bar_config(disable=lowercase )
lowercase_ :Optional[Any] = "A painting of a squirrel eating a burger"
lowercase_ :Any = torch.Generator(device=lowercase ).manual_seed(0 )
lowercase_ :int = alt_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowercase , )
lowercase_ :Union[str, Any] = output.images
lowercase_ :Tuple = torch.Generator(device=lowercase ).manual_seed(0 )
lowercase_ :str = alt_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowercase , return_dict=lowercase , )[0]
lowercase_ :Optional[int] = image[0, -3:, -3:, -1]
lowercase_ :Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ :List[str] = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Any = self.dummy_cond_unet
lowercase_ :List[Any] = PNDMScheduler(skip_prk_steps=lowercase )
lowercase_ :Optional[int] = self.dummy_vae
lowercase_ :str = self.dummy_text_encoder
lowercase_ :List[str] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ :int = 77
lowercase_ :Optional[Any] = self.dummy_image.to(lowercase )
# put models in fp16
lowercase_ :List[str] = unet.half()
lowercase_ :Any = vae.half()
lowercase_ :List[str] = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ :Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
lowercase_ :Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase )
lowercase_ :Tuple = alt_pipe.to(lowercase )
alt_pipe.set_progress_bar_config(disable=lowercase )
lowercase_ :Optional[Any] = "A painting of a squirrel eating a burger"
lowercase_ :Any = torch.manual_seed(0 )
lowercase_ :Any = alt_pipe(
[prompt] , generator=lowercase , num_inference_steps=2 , output_type="np" , image=lowercase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ :int = init_image.resize((760, 504) )
lowercase_ :Union[str, Any] = "BAAI/AltDiffusion"
lowercase_ :Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase , safety_checker=lowercase , )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowercase_ :Union[str, Any] = "A fantasy landscape, trending on artstation"
lowercase_ :str = torch.manual_seed(0 )
lowercase_ :Dict = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , generator=lowercase , output_type="np" , )
lowercase_ :Dict = output.images[0]
lowercase_ :Any = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ :List[Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ :Union[str, Any] = init_image.resize((768, 512) )
lowercase_ :str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ :Union[str, Any] = "BAAI/AltDiffusion"
lowercase_ :Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase , safety_checker=lowercase , )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowercase_ :Tuple = "A fantasy landscape, trending on artstation"
lowercase_ :Dict = torch.manual_seed(0 )
lowercase_ :List[str] = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , generator=lowercase , output_type="np" , )
lowercase_ :Dict = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 147
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int ={
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str =['''CLIPFeatureExtractor''']
lowerCAmelCase : Optional[int] =['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =[
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =[
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 147
| 1
|
"""simple docstring"""
_snake_case : List[str] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
_snake_case : List[Any] = {value: key for key, value in encode_dict.items()}
def A__ ( UpperCamelCase ):
A = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def A__ ( UpperCamelCase ):
if set(UpperCamelCase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
A = ""
for word in coded.split():
while len(UpperCamelCase ) != 0:
decoded += decode_dict[word[:5]]
A = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292
|
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292
| 1
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase = ["""gpt2"""]
__UpperCAmelCase = """gpt2"""
if is_tf_available():
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : str = tokenizer
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = TFGPTaLMHeadModel.from_config(lowerCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.tokenizer(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenized["""input_ids"""].to_tensor()
__lowerCAmelCase : List[str] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCAmelCase : List[str] = self.model(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [GPTaTokenizer.from_pretrained(lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCAmelCase : Dict = [TFGPTaTokenizer.from_pretrained(lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCAmelCase : Optional[Any] = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__lowerCAmelCase : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCAmelCase : Union[str, Any] = tokenizer([test_inputs] , return_tensors="""tf""" )
__lowerCAmelCase : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCAmelCase : Any = python_outputs[key].numpy()
__lowerCAmelCase : Dict = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase : int = tf.function(lowerCAmelCase )
for test_inputs in self.test_sentences:
__lowerCAmelCase : Optional[int] = tf.constant(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = compiled_tokenizer(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tf_tokenizer(lowerCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase : Any = ModelToSave(tokenizer=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase : Optional[int] = model.serving(lowerCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCAmelCase : int = Path(lowerCAmelCase ) / """saved.model"""
tf.saved_model.save(lowerCAmelCase , lowerCAmelCase , signatures={"""serving_default""": model.serving} )
__lowerCAmelCase : Union[str, Any] = tf.saved_model.load(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = loaded_model.signatures["""serving_default"""](lowerCAmelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase : Optional[int] = tf_tokenizer(lowerCAmelCase ) # Build model with some sample inputs
__lowerCAmelCase : List[Any] = tf_tokenizer.get_config()
__lowerCAmelCase : Optional[int] = TFGPTaTokenizer.from_config(lowerCAmelCase )
__lowerCAmelCase : Tuple = model_from_config(lowerCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCAmelCase : Optional[int] = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCAmelCase : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase : int = tf_tokenizer(lowerCAmelCase , max_length=lowerCAmelCase )
__lowerCAmelCase : Tuple = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 139
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 139
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ :List[Any] = logging.get_logger(__name__)
def A ( a_ ,a_ ,a_ ,a_ ) -> Any:
__UpperCamelCase : List[Any] =original_name.split('.' )[0]
__UpperCamelCase : Any =key.split('.' )
__UpperCamelCase : Any =int(key_list[key_list.index(a_ ) - 2] )
__UpperCamelCase : Optional[int] =int(key_list[key_list.index(a_ ) - 1] )
__UpperCamelCase : Tuple =orig_block_num - offset
__UpperCamelCase : Any =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' ,F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def A ( a_ ) -> Optional[int]:
__UpperCamelCase : List[str] =OrderedDict()
__UpperCamelCase , __UpperCamelCase : Dict =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase : str =key.replace('network' ,'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase : int =key[: key.find('proj' )]
__UpperCamelCase : str =key.replace(a_ ,F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase : int =key.replace('proj' ,'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase : str ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase : Any =replace_key_with_offset(a_ ,a_ ,'mlp.fc1' ,'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase : Tuple =replace_key_with_offset(a_ ,a_ ,'mlp.fc2' ,'output.conv2' )
if "norm1" in key:
__UpperCamelCase : Any =replace_key_with_offset(a_ ,a_ ,'norm1' ,'before_norm' )
if "norm2" in key:
__UpperCamelCase : List[str] =replace_key_with_offset(a_ ,a_ ,'norm2' ,'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase : Tuple =replace_key_with_offset(a_ ,a_ ,'layer_scale_1' ,'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase : Dict =replace_key_with_offset(a_ ,a_ ,'layer_scale_2' ,'layer_scale_2' )
if "head" in key:
__UpperCamelCase : List[Any] =key.replace('head' ,'classifier' )
__UpperCamelCase : Optional[Any] =value
return new_state_dict
def A ( ) -> int:
__UpperCamelCase : List[Any] ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase : Dict =Image.open(requests.get(a_ ,stream=a_ ).raw )
return image
@torch.no_grad()
def A ( a_ ,a_ ,a_ ) -> List[str]:
__UpperCamelCase : Optional[Any] =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase : Union[str, Any] ='huggingface/label-files'
__UpperCamelCase : List[Any] =model_name[-3:]
__UpperCamelCase : Optional[int] =1_000
__UpperCamelCase : str ='imagenet-1k-id2label.json'
__UpperCamelCase : Any =(1, 1_000)
# set config attributes
__UpperCamelCase : Tuple =json.load(open(hf_hub_download(a_ ,a_ ,repo_type='dataset' ) ,'r' ) )
__UpperCamelCase : Optional[Any] ={int(a_ ): v for k, v in idalabel.items()}
__UpperCamelCase : str =idalabel
__UpperCamelCase : List[str] ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase : Tuple =[2, 2, 6, 2]
__UpperCamelCase : List[Any] =[64, 128, 320, 512]
__UpperCamelCase : Any =4.0
__UpperCamelCase : Tuple =0.9
elif size == "s24":
__UpperCamelCase : str =[4, 4, 12, 4]
__UpperCamelCase : Optional[Any] =[64, 128, 320, 512]
__UpperCamelCase : int =4.0
__UpperCamelCase : Union[str, Any] =0.9
elif size == "s36":
__UpperCamelCase : int =[6, 6, 18, 6]
__UpperCamelCase : int =[64, 128, 320, 512]
__UpperCamelCase : List[Any] =4.0
__UpperCamelCase : Any =1e-6
__UpperCamelCase : str =0.9
elif size == "m36":
__UpperCamelCase : Optional[Any] =[6, 6, 18, 6]
__UpperCamelCase : Union[str, Any] =[96, 192, 384, 768]
__UpperCamelCase : int =4.0
__UpperCamelCase : Tuple =1e-6
__UpperCamelCase : Dict =0.95
elif size == "m48":
__UpperCamelCase : Union[str, Any] =[8, 8, 24, 8]
__UpperCamelCase : List[Any] =[96, 192, 384, 768]
__UpperCamelCase : Optional[Any] =4.0
__UpperCamelCase : Optional[Any] =1e-6
__UpperCamelCase : Dict =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase : List[str] =PoolFormerImageProcessor(crop_pct=a_ )
# Prepare image
__UpperCamelCase : Optional[Any] =prepare_img()
__UpperCamelCase : int =image_processor(images=a_ ,return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase : List[str] =torch.load(a_ ,map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase : Union[str, Any] =rename_keys(a_ )
# create HuggingFace model and load state dict
__UpperCamelCase : List[Any] =PoolFormerForImageClassification(a_ )
model.load_state_dict(a_ )
model.eval()
# Define image processor
__UpperCamelCase : List[Any] =PoolFormerImageProcessor(crop_pct=a_ )
__UpperCamelCase : str =image_processor(images=prepare_img() ,return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase : List[Any] =model(a_ )
__UpperCamelCase : Union[str, Any] =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase : List[Any] =torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
__UpperCamelCase : Tuple =torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
__UpperCamelCase : List[Any] =torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
__UpperCamelCase : Optional[int] =torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
__UpperCamelCase : List[str] =torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,a_ ,atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
A_ :Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A_ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 71
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[str] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModel.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =AutoModel.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =AutoModelForPreTraining.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Tuple =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Tuple =TFAutoModelForCausalLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =AutoModelForCausalLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =AutoModelForCausalLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : int =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Tuple =TFAutoModelForMaskedLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =AutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Optional[Any] =AutoModelForMaskedLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[Any] =AutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : str =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =AutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
__UpperCamelCase : str =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
__UpperCamelCase : int =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
| 71
| 1
|
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
lowerCamelCase = len(snake_case__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(snake_case__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , )
def a__ ( snake_case__ ) -> None:
lowerCamelCase = []
depth_first_search([] , [] , [] , snake_case__ , snake_case__ )
# Print all the boards
for board in boards:
for column in board:
print(snake_case__ )
print("""""" )
print(len(snake_case__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 368
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_a , speech_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , )
def _lowerCAmelCase ( self , _a = "auto" ):
"""simple docstring"""
if slice_size == "auto":
lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.enable_attention_slicing(_a )
@torch.no_grad()
def __call__( self , _a , _a=16_000 , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
"""simple docstring"""
lowerCamelCase = self.speech_processor.feature_extractor(
_a , return_tensors="""pt""" , sampling_rate=_a ).input_features.to(self.device )
lowerCamelCase = self.speech_model.generate(_a , max_length=480_000 )
lowerCamelCase = self.speech_processor.tokenizer.batch_decode(_a , skip_special_tokens=_a , normalize=_a )[
0
]
if isinstance(_a , _a ):
lowerCamelCase = 1
elif isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_a )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_a )}.' )
# get prompt text embeddings
lowerCamelCase = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase , lowerCamelCase , lowerCamelCase = text_embeddings.shape
lowerCamelCase = text_embeddings.repeat(1 , _a , 1 )
lowerCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , _a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase = 42
if negative_prompt is None:
lowerCamelCase = [""""""] * batch_size
elif type(_a ) is not type(_a ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(_a )} !='
f' {type(_a )}.' )
elif isinstance(_a , _a ):
lowerCamelCase = [negative_prompt]
elif batch_size != len(_a ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(_a )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
lowerCamelCase = negative_prompt
lowerCamelCase = text_input_ids.shape[-1]
lowerCamelCase = self.tokenizer(
_a , padding="""max_length""" , max_length=_a , truncation=_a , return_tensors="""pt""" , )
lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase = uncond_embeddings.shape[1]
lowerCamelCase = uncond_embeddings.repeat(1 , _a , 1 )
lowerCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , _a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase = torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to(
self.device )
else:
lowerCamelCase = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase = {}
if accepts_eta:
lowerCamelCase = eta
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
lowerCamelCase = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase = noise_pred.chunk(2 )
lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a , _a )
lowerCamelCase = 1 / 0.18_215 * latents
lowerCamelCase = self.vae.decode(_a ).sample
lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(_a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 168
| 0
|
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
a = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
|
def __A ( __lowerCamelCase ) -> int:
a = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __A ( __lowerCamelCase = 100 ) -> int:
a = 1
a = 2
for i in range(2 , max_n + 1 ):
a = pre_numerator
a = 2 * i // 3 if i % 3 == 0 else 1
a = cur_numerator
a = e_cont * pre_numerator + temp
return sum_digits(__lowerCamelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 228
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Optional[Any] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 361
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = BlenderbotConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , )-> List[Any]:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def UpperCAmelCase_ ( self , A_ , A_ )-> int:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModel(config=A_ ).get_decoder()
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['attention_mask'][:1, :]
UpperCamelCase = inputs_dict['head_mask']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(A_ , attention_mask=A_ )[0]
UpperCamelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def A_( A : List[Any] , A : Tuple , A : Optional[Any] , A : List[str]=None , A : str=None , A : List[Any]=None , A : Dict=None , A : Any=None , ):
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(A , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = ["""My friends are cool but they eat too many carbs."""]
lowerCAmelCase_ = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 251
| 0
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _a ( unittest.TestCase ):
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Union[str, Any] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Optional[int] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Tuple = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> str:
UpperCAmelCase_: Tuple = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> str:
UpperCAmelCase_: Any = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCAmelCase_: List[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[Any] = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCAmelCase_: List[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Any:
# pass variant but use the non-variant filenames
UpperCAmelCase_: Any = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
UpperCAmelCase_: Dict = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Dict = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase_: List[Any] = """fp16"""
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Optional[int] = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
UpperCAmelCase_: Dict = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[int]:
# pass variant but use the non-variant filenames
UpperCAmelCase_: Optional[int] = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
UpperCAmelCase_: List[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Any:
UpperCAmelCase_: Optional[Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCAmelCase_: Optional[int] = """fp16"""
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
| 147
|
from collections import namedtuple
a : List[Any] = namedtuple('from_to', 'from_ to')
a : Tuple = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 3_5.3_1_4_7),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: str , lowerCAmelCase__: str ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(lowerCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(lowerCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
lowerCAmelCase__ : List[Any] = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
lowerCAmelCase__ : List[Any] = {
"ctrl": 2_56,
}
lowerCAmelCase__ : str = {
"Pregnancy": 16_86_29,
"Christianity": 76_75,
"Explain": 10_64_23,
"Fitness": 6_34_40,
"Saving": 6_31_63,
"Ask": 2_71_71,
"Ass": 9_59_85,
"Joke": 16_35_09,
"Questions": 4_56_22,
"Thoughts": 4_96_05,
"Retail": 5_23_42,
"Feminism": 16_43_38,
"Writing": 1_19_92,
"Atheism": 19_22_63,
"Netflix": 4_86_16,
"Computing": 3_96_39,
"Opinion": 4_32_13,
"Alone": 4_49_67,
"Funny": 5_89_17,
"Gaming": 4_03_58,
"Human": 40_88,
"India": 13_31,
"Joker": 7_71_38,
"Diet": 3_62_06,
"Legal": 1_18_59,
"Norman": 49_39,
"Tip": 7_26_89,
"Weight": 5_23_43,
"Movies": 4_62_73,
"Running": 2_34_25,
"Science": 20_90,
"Horror": 3_77_93,
"Confession": 6_05_72,
"Finance": 1_22_50,
"Politics": 1_63_60,
"Scary": 19_19_85,
"Support": 1_26_54,
"Technologies": 3_25_16,
"Teenage": 6_61_60,
"Event": 3_27_69,
"Learned": 6_74_60,
"Notion": 18_27_70,
"Wikipedia": 3_75_83,
"Books": 66_65,
"Extract": 7_60_50,
"Confessions": 10_27_01,
"Conspiracy": 7_59_32,
"Links": 6_36_74,
"Narcissus": 15_04_25,
"Relationship": 5_47_66,
"Relationships": 13_47_96,
"Reviews": 4_16_71,
"News": 42_56,
"Translation": 2_68_20,
"multilingual": 12_84_06,
}
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : str = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[int] = char
__UpperCAmelCase : Optional[int] = set(_UpperCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = CONTROL_CODES
def __init__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str="<unk>" , **UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__(unk_token=UpperCAmelCase_ , **UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : Optional[Any] = json.load(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in merges]
__UpperCAmelCase : List[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
__UpperCAmelCase : int = {}
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : Union[str, Any] = tuple(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__UpperCAmelCase : str = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Any = bigram
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Any = 0
while i < len(UpperCAmelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : List[Any] = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
__UpperCAmelCase : Optional[int] = get_pairs(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = "@@ ".join(UpperCAmelCase_ )
__UpperCAmelCase : Dict = word[:-4]
__UpperCAmelCase : List[Any] = word
return word
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : int ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = re.findall(R"\S+\n?" , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(" " ) ) )
return split_tokens
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : str ):
"""simple docstring"""
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : int ):
"""simple docstring"""
__UpperCAmelCase : Tuple = " ".join(UpperCAmelCase_ ).replace("@@ " , "" ).strip()
return out_string
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCAmelCase : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Union[str, Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + "\n" )
__UpperCAmelCase : Dict = 0
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : List[str] = token_index
writer.write(" ".join(UpperCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 37
|
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : int = (1 << p) - 1
for _ in range(p - 2 ):
__UpperCAmelCase : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 37
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( _a ):
@staticmethod
@abstractmethod
def __UpperCamelCase ( SCREAMING_SNAKE_CASE__ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def __UpperCamelCase ( self : List[Any] ):
raise NotImplementedError()
| 139
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A_ = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 13_10_72,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
}
def A_ ( snake_case , snake_case ):
return torch.atana(snake_case , snake_case ) / math.pi * 2
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE:Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class _snake_case ( _a ):
pass
class _snake_case ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : str ):
super().__init__()
SCREAMING_SNAKE_CASE:List[Any] = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ ,n_attn_layers=4 )
SCREAMING_SNAKE_CASE:List[str] = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE:Dict = torch.quasirandom.SobolEngine(1 ,scramble=SCREAMING_SNAKE_CASE__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
A_ = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
A_ = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
A_ = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
A_ = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def A_ ( snake_case ):
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A_ ( snake_case ):
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def A_ ( snake_case , snake_case=13 ):
SCREAMING_SNAKE_CASE:Optional[Any] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
SCREAMING_SNAKE_CASE:List[str] = 0
if string.startswith("net.3." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[6:]
elif string.startswith("net." ):
SCREAMING_SNAKE_CASE:int = string[4:]
while string.startswith("main.7." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[7:]
if string.startswith("main." ):
SCREAMING_SNAKE_CASE:str = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE:Tuple = string[:2]
SCREAMING_SNAKE_CASE:Optional[Any] = string[2:]
else:
SCREAMING_SNAKE_CASE:Optional[Any] = string[0]
SCREAMING_SNAKE_CASE:Optional[Any] = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE:Any = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:List[str] = "mid_block"
elif depth > 0 and int(snake_case ) < 7:
SCREAMING_SNAKE_CASE:Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Dict = F'''down_blocks.{depth}'''
elif depth > 0 and int(snake_case ) > 7:
SCREAMING_SNAKE_CASE:Any = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
SCREAMING_SNAKE_CASE:Optional[int] = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Any = F'''up_blocks.{max_depth - 1}''' if int(snake_case ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
SCREAMING_SNAKE_CASE:List[Any] = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE:List[str] = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE:List[Any] = convert_attn_naming(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = new_string_left
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = prefix + "." + new_layer + "." + string_left
else:
SCREAMING_SNAKE_CASE:int = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE:str = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = transform_conv_attns(snake_case , snake_case , snake_case )
else:
SCREAMING_SNAKE_CASE:Optional[int] = v
return new_state_dict
def A_ ( snake_case , snake_case , snake_case ):
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE:List[str] = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE:Optional[Any] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE:Optional[int] = v.shape[0]
SCREAMING_SNAKE_CASE:Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE:Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE:List[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE:List[str] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
SCREAMING_SNAKE_CASE:List[str] = download(snake_case )
SCREAMING_SNAKE_CASE:List[str] = MODELS_MAP[model_name]["sample_rate"]
SCREAMING_SNAKE_CASE:Tuple = MODELS_MAP[model_name]["sample_size"]
SCREAMING_SNAKE_CASE:Union[str, Any] = Object()
SCREAMING_SNAKE_CASE:int = sample_size
SCREAMING_SNAKE_CASE:Any = sample_rate
SCREAMING_SNAKE_CASE:List[str] = 0
SCREAMING_SNAKE_CASE:Optional[Any] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE:Optional[Any] = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )["state_dict"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE:Dict = orig_model.state_dict()
SCREAMING_SNAKE_CASE:Union[str, Any] = rename_orig_weights(snake_case )
SCREAMING_SNAKE_CASE:Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE:Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(snake_case ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE:Dict = value.squeeze()
SCREAMING_SNAKE_CASE:Union[str, Any] = value
diffusers_model.load_state_dict(snake_case )
SCREAMING_SNAKE_CASE:int = 100
SCREAMING_SNAKE_CASE:int = 33
SCREAMING_SNAKE_CASE:Any = IPNDMScheduler(num_train_timesteps=snake_case )
SCREAMING_SNAKE_CASE:str = torch.manual_seed(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
SCREAMING_SNAKE_CASE:int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
SCREAMING_SNAKE_CASE:List[Any] = get_crash_schedule(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE:Union[str, Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
SCREAMING_SNAKE_CASE:Tuple = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
SCREAMING_SNAKE_CASE:Union[str, Any] = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE:Union[str, Any] = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE:str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , snake_case )
print("Diff max" , snake_case )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
A_ = parser.parse_args()
main(args)
| 139
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A ( _a ):
lowercase_ = 'big_bird'
def __init__( self : Any , lowerCAmelCase_ : Optional[Any]=5_03_58 , lowerCAmelCase_ : Optional[Any]=7_68 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Optional[int]=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : Any="gelu_new" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=40_96 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Any=0.0_2 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=66 , lowerCAmelCase_ : List[Any]="block_sparse" , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=64 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , sep_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class A ( _a ):
@property
def __lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 355
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_snake_case : int = 'docs/source/en/_toctree.yml'
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = defaultdict(UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_a = [key for key, value in counts.items() if value > 1]
_a = []
for duplicate_key in duplicates:
_a = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCamelCase ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCamelCase , key=lambda UpperCamelCase : s["title"].lower() )
def snake_case_ (UpperCamelCase : str=False ):
'''simple docstring'''
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
_a = yaml.safe_load(f.read() )
# Get to the API doc
_a = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a = content[api_idx]['''sections''']
# Then to the model doc
_a = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a = api_doc[model_idx]['''sections''']
_a = [(idx, section) for idx, section in enumerate(UpperCamelCase ) if '''sections''' in section]
_a = False
for idx, modality_doc in modalities_docs:
_a = modality_doc['''sections''']
_a = clean_model_doc_toc(UpperCamelCase )
if old_modality_doc != new_modality_doc:
_a = True
if overwrite:
_a = new_modality_doc
if diff:
if overwrite:
_a = model_doc
_a = api_doc
with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCamelCase , allow_unicode=UpperCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_snake_case : List[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 179
| 0
|
from __future__ import annotations
import time
import numpy as np
__UpperCAmelCase = [8, 5, 9, 7]
__UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Any = claim_vector
UpperCamelCase : Tuple = allocated_resources_table
UpperCamelCase : List[Any] = maximum_claim_table
def snake_case_ ( self ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def snake_case_ ( self ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def snake_case_ ( self ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def snake_case_ ( self ) -> dict[int, list[int]]:
return {self.__need().index(SCREAMING_SNAKE_CASE_ ): i for i in self.__need()}
def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : str = self.__need()
UpperCamelCase : Optional[Any] = self.__allocated_resources_table
UpperCamelCase : Tuple = self.__available_resources()
UpperCamelCase : List[str] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
UpperCamelCase : str = False
for each_need in need_list:
UpperCamelCase : Tuple = True
for index, need in enumerate(SCREAMING_SNAKE_CASE_ ):
if need > available_resources[index]:
UpperCamelCase : int = False
break
if execution:
UpperCamelCase : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase : int = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(SCREAMING_SNAKE_CASE_ )
# update available/freed resources stack
UpperCamelCase : Any = np.array(SCREAMING_SNAKE_CASE_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(SCREAMING_SNAKE_CASE_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def snake_case_ ( self ) -> Dict:
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a_ : str = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
a_ : Tuple = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a_ : Any = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a_ : Union[str, Any] = sorted(arg_to_scheduler.keys())
a_ : List[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class a ( pl.LightningModule ):
def __init__( self , __magic_name__ , __magic_name__=None , __magic_name__="base" , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> List[str]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__magic_name__ )
_a = 0
_a = Path(self.hparams.output_dir )
_a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__magic_name__ , **__magic_name__ , )
else:
_a = config
_a = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __magic_name__ , __magic_name__ ):
assert hasattr(self.config , __magic_name__ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __magic_name__ , getattr(self.hparams , __magic_name__ ) )
if tokenizer is None:
_a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__magic_name__ , )
else:
_a = tokenizer
_a = MODEL_MODES[mode]
if model is None:
_a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__magic_name__ , )
else:
_a = model
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
_a = self.model_type.from_pretrained(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
_a = arg_to_scheduler[self.hparams.lr_scheduler]
_a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_a = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCAmelCase ( self ) -> Any:
_a = self.model
_a = ['bias', 'LayerNorm.weight']
_a = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_a = Adafactor(
__magic_name__ , lr=self.hparams.learning_rate , scale_parameter=__magic_name__ , relative_step=__magic_name__ )
else:
_a = AdamW(
__magic_name__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_a = optimizer
_a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[str]:
return self.validation_step(__magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
return self.validation_end(__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
if stage == "test":
_a = len(self.test_dataloader().dataset )
else:
_a = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__magic_name__ )
_a = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = False ) -> int:
raise NotImplementedError('You must implement this for your task' )
def __UpperCAmelCase ( self ) -> Tuple:
return self.train_loader
def __UpperCAmelCase ( self ) -> Dict:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def __UpperCAmelCase ( self ) -> Tuple:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__magic_name__ , list(filter(__magic_name__ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
_a = self.output_dir.joinpath('best_tfmr' )
_a = self.step_count
self.model.save_pretrained(__magic_name__ )
self.tokenizer.save_pretrained(__magic_name__ )
@staticmethod
def __UpperCAmelCase ( __magic_name__ , __magic_name__ ) -> Optional[int]:
parser.add_argument(
'--model_name_or_path' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__magic_name__ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__magic_name__ , type=__magic_name__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__magic_name__ ).parent / 'test_run' / 'cache' ) , type=__magic_name__ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__magic_name__ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__magic_name__ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__magic_name__ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__magic_name__ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=__magic_name__ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__magic_name__ , metavar=__magic_name__ , type=__magic_name__ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__magic_name__ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__magic_name__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__magic_name__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__magic_name__ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__magic_name__ )
parser.add_argument('--train_batch_size' , default=32 , type=__magic_name__ )
parser.add_argument('--eval_batch_size' , default=32 , type=__magic_name__ )
parser.add_argument('--adafactor' , action='store_true' )
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Any:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__magic_name__ )
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = trainer.lr_schedulers[0]['scheduler']
_a = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
rank_zero_info('***** Validation results *****' )
_a = trainer.callback_metrics
# Log results
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
rank_zero_info('***** Test results *****' )
_a = trainer.callback_metrics
# Log and save results to file
_a = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__magic_name__ , 'w' ) as writer:
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> None:
'''simple docstring'''
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase__ ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase__ , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase__ )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase__ , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase__ , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase__ ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase__ , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _A (lowerCAmelCase__ :BaseTransformer , lowerCAmelCase__ :argparse.Namespace , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=[] , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Union[str, Any]=None , **lowerCAmelCase__ :List[str] , ) -> str:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
_a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
_a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
_a = LoggingCallback()
_a = {}
if args.fpaa:
_a = 16
if args.gpus > 1:
_a = 'auto'
_a = 'ddp'
_a = args.accumulate_grad_batches
_a = None
_a = 'auto'
_a = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 168
| 0
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowerCamelCase : str = 8
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Dict=BITS ):
'''simple docstring'''
lowerCamelCase_ = x.device
lowerCamelCase_ = (x * 2_55).int().clamp(0 , 2_55 )
lowerCamelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase )
lowerCamelCase_ = rearrange(lowercase , 'd -> d 1 1' )
lowerCamelCase_ = rearrange(lowercase , 'b c h w -> b c 1 h w' )
lowerCamelCase_ = ((x & mask) != 0).float()
lowerCamelCase_ = rearrange(lowercase , 'b c d h w -> b (c d) h w' )
lowerCamelCase_ = bits * 2 - 1
return bits
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple=BITS ):
'''simple docstring'''
lowerCamelCase_ = x.device
lowerCamelCase_ = (x > 0).int()
lowerCamelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase , dtype=torch.intaa )
lowerCamelCase_ = rearrange(lowercase , 'd -> d 1 1' )
lowerCamelCase_ = rearrange(lowercase , 'b (c d) h w -> b c d h w' , d=8 )
lowerCamelCase_ = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 2_55).clamp(0.0 , 1.0 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : float = 0.0 , lowercase : bool = True , lowercase : int=None , lowercase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase_ = self.alphas_cumprod[timestep]
lowerCamelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase_ = self.bit_scale
if self.config.clip_sample:
lowerCamelCase_ = torch.clamp(lowercase , -scale , lowercase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase_ = self._get_variance(lowercase , lowercase )
lowerCamelCase_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase_ = model_output.device if torch.is_tensor(lowercase ) else 'cpu'
lowerCamelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowercase ).to(lowercase )
lowerCamelCase_ = self._get_variance(lowercase , lowercase ) ** 0.5 * eta * noise
lowerCamelCase_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : Dict="epsilon" , lowercase : Tuple=None , lowercase : bool = True , ):
'''simple docstring'''
lowerCamelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase_ , lowerCamelCase_ = torch.split(lowercase , sample.shape[1] , dim=1 )
else:
lowerCamelCase_ = None
# 1. compute alphas, betas
lowerCamelCase_ = self.alphas_cumprod[t]
lowerCamelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase_ = 1 - alpha_prod_t
lowerCamelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase_ = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
lowerCamelCase_ = self.bit_scale
if self.config.clip_sample:
lowerCamelCase_ = torch.clamp(lowercase , -scale , lowercase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase_ = 0
if t > 0:
lowerCamelCase_ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowercase ).to(model_output.device )
lowerCamelCase_ = (self._get_variance(lowercase , predicted_variance=lowercase ) ** 0.5) * noise
lowerCamelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , A_ : UNetaDConditionModel , A_ : Union[DDIMScheduler, DDPMScheduler] , A_ : Optional[float] = 1.0 , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = bit_scale
lowerCamelCase_ = (
ddim_bit_scheduler_step if isinstance(A_ , A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self : Optional[int] , A_ : Optional[int] = 256 , A_ : Optional[int] = 256 , A_ : Optional[int] = 50 , A_ : Optional[torch.Generator] = None , A_ : Optional[int] = 1 , A_ : Optional[str] = "pil" , A_ : bool = True , **A_ : Tuple , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
lowerCamelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=A_ , )
lowerCamelCase_ = decimal_to_bits(A_ ) * self.bit_scale
lowerCamelCase_ = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase_ = self.unet(A_ , A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample
lowerCamelCase_ = bits_to_decimal(A_ )
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 208
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''transfo-xl'''
UpperCamelCase = ['''mems''']
UpperCamelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , A_ : Optional[Any]=267735 , A_ : Optional[Any]=[20000, 40000, 200000] , A_ : Union[str, Any]=1024 , A_ : Optional[Any]=1024 , A_ : Optional[int]=16 , A_ : Any=64 , A_ : List[Any]=4096 , A_ : str=4 , A_ : int=False , A_ : List[Any]=18 , A_ : Optional[int]=1600 , A_ : Union[str, Any]=1000 , A_ : Optional[Any]=True , A_ : Optional[int]=True , A_ : List[str]=0 , A_ : int=-1 , A_ : List[Any]=True , A_ : List[Any]=0.1 , A_ : str=0.0 , A_ : Dict=True , A_ : Dict="normal" , A_ : Dict=0.01 , A_ : Optional[Any]=0.01 , A_ : Any=0.02 , A_ : int=1E-5 , A_ : List[str]=0 , **A_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
lowerCamelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase_ = [False] + [False] * len(self.cutoffs )
lowerCamelCase_ = d_model
lowerCamelCase_ = d_embed
lowerCamelCase_ = d_head
lowerCamelCase_ = d_inner
lowerCamelCase_ = div_val
lowerCamelCase_ = pre_lnorm
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = mem_len
lowerCamelCase_ = same_length
lowerCamelCase_ = attn_type
lowerCamelCase_ = clamp_len
lowerCamelCase_ = sample_softmax
lowerCamelCase_ = adaptive
lowerCamelCase_ = dropout
lowerCamelCase_ = dropatt
lowerCamelCase_ = untie_r
lowerCamelCase_ = init
lowerCamelCase_ = init_range
lowerCamelCase_ = proj_init_std
lowerCamelCase_ = init_std
lowerCamelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=A_ , **A_ )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , A_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 208
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __A :
"""simple docstring"""
UpperCamelCase__ : int =XGLMConfig
UpperCamelCase__ : Optional[Any] ={}
UpperCamelCase__ : List[str] ="""gelu"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : Tuple =parent
__UpperCamelCase : List[str] =batch_size
__UpperCamelCase : str =seq_length
__UpperCamelCase : Dict =is_training
__UpperCamelCase : Tuple =use_input_mask
__UpperCamelCase : List[Any] =use_labels
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : List[Any] =d_model
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : List[str] =num_attention_heads
__UpperCamelCase : Optional[int] =ffn_dim
__UpperCamelCase : str =activation_function
__UpperCamelCase : Any =activation_dropout
__UpperCamelCase : Optional[int] =attention_dropout
__UpperCamelCase : Optional[int] =max_position_embeddings
__UpperCamelCase : Any =initializer_range
__UpperCamelCase : Dict =None
__UpperCamelCase : Optional[int] =0
__UpperCamelCase : Optional[Any] =2
__UpperCamelCase : str =1
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase : Union[str, Any] =None
if self.use_input_mask:
__UpperCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Any =self.get_config()
__UpperCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : int =config_and_inputs
__UpperCamelCase : Optional[Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : str =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] =(
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMModelTester(self )
__UpperCamelCase : Dict =ConfigTester(self , config_class=lowerCamelCase__ , n_embd=37 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] =TFXGLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self , lowerCamelCase__=True ):
"""simple docstring"""
__UpperCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : List[str] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase : str =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase : Optional[Any] =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Union[str, Any] =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase : str =tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase : Union[str, Any] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase : Any =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , seed=[7, 0] )
__UpperCamelCase : Tuple =tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
__UpperCamelCase : Optional[int] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase : List[Any] =tokenizer(lowerCamelCase__ , return_tensors='tf' , padding=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =inputs['input_ids']
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__UpperCamelCase : List[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Any =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase : Optional[Any] =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Optional[int] =tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Any =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
| 71
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: tuple[int, ...] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def lowercase__( __UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[str] = []
for key in product(__UpperCamelCase ,repeat=3 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = try_key(__UpperCamelCase ,__UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def lowercase__( __UpperCamelCase: list[str] ,__UpperCamelCase: str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__( __UpperCamelCase: str = "p059_cipher.txt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[int]
SCREAMING_SNAKE_CASE : list[str]
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding='utf-8' )
SCREAMING_SNAKE_CASE : Optional[int] = [int(__UpperCamelCase ) for number in data.strip().split(',' )]
SCREAMING_SNAKE_CASE : List[Any] = filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE : Optional[Any] = filter_common_word(__UpperCamelCase ,__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
SCREAMING_SNAKE_CASE : Dict = possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 251
| 0
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case (A_ :Optional[Any] , A_ :Any=7 ):
'''simple docstring'''
a : Tuple = None
if token is not None:
a : Any = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
a : Optional[Any] = '636036'
a : List[str] = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
a : Any = requests.get(A_ , headers=A_ ).json()
return result["workflow_runs"]
def snake_case (A_ :Dict ):
'''simple docstring'''
a : Any = get_daily_ci_runs(A_ )
a : Union[str, Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
a : List[str] = workflow_run['id']
break
return workflow_run_id
def snake_case (A_ :int , A_ :Tuple , A_ :str ):
'''simple docstring'''
a : List[str] = get_last_daily_ci_runs(A_ )
if workflow_run_id is not None:
a : Union[str, Any] = get_artifacts_links(worflow_run_id=A_ , token=A_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
a : Any = artifacts_links[artifact_name]
download_artifact(
artifact_name=A_ , artifact_url=A_ , output_dir=A_ , token=A_ )
def snake_case (A_ :Dict , A_ :int , A_ :Union[str, Any] ):
'''simple docstring'''
get_last_daily_ci_artifacts(A_ , A_ , A_ )
a : Optional[Any] = {}
for artifact_name in artifact_names:
a : List[Any] = os.path.join(A_ , f'''{artifact_name}.zip''' )
if os.path.isfile(A_ ):
a : Tuple = {}
with zipfile.ZipFile(A_ ) as z:
for filename in z.namelist():
if not os.path.isdir(A_ ):
# read the file
with z.open(A_ ) as f:
a : str = f.read().decode('UTF-8' )
return results
| 186
|
"""simple docstring"""
def snake_case (A_ :list[int] , A_ :str ):
'''simple docstring'''
a : Optional[int] = int(A_ )
# Initialize Result
a : int = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_UpperCamelCase : Dict = []
_UpperCamelCase : str = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
_UpperCamelCase : Dict = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
_UpperCamelCase : Optional[int] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
_UpperCamelCase : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
_UpperCamelCase : Dict = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f'''Following is minimal change for {value}: ''')
_UpperCamelCase : Tuple = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 186
| 1
|
'''simple docstring'''
import os
from distutils.util import strtobool
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for e in env_keys:
lowerCAmelCase__ : List[Any] = int(os.environ.get(UpperCamelCase , -1 ) )
if val >= 0:
return val
return default
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = os.environ.get(UpperCamelCase , str(UpperCamelCase ) )
return strtobool(UpperCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase="no" ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = os.environ.get(UpperCamelCase , str(UpperCamelCase ) )
return value
| 37
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
_lowerCAmelCase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
_lowerCAmelCase = '''▁'''
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : Tuple = (
AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase )
else mask_token
)
lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : str = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : Tuple = keep_accents
lowerCAmelCase__ : Any = vocab_file
lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return len(self.sp_model )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
lowerCAmelCase__ : Optional[Any] = self.__dict__.copy()
lowerCAmelCase__ : Optional[Any] = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
if self.remove_space:
lowerCAmelCase__ : int = """ """.join(inputs.strip().split() )
else:
lowerCAmelCase__ : str = inputs
lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
lowerCAmelCase__ : Tuple = outputs.lower()
return outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
lowerCAmelCase__ : str = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase__ : str = cur_pieces[1:]
else:
lowerCAmelCase__ : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Tuple = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : int = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 37
| 1
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('only integers accepted as input' )
else:
SCREAMING_SNAKE_CASE : Any = str(abs(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : int = [list(__UpperCamelCase ) for char in range(len(__UpperCamelCase ) )]
for index in range(len(__UpperCamelCase ) ):
num_transpositions[index].pop(__UpperCamelCase )
return max(
int(''.join(list(__UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 354
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : Dict = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = n_positions
SCREAMING_SNAKE_CASE : Tuple = n_embd
SCREAMING_SNAKE_CASE : Any = n_layer
SCREAMING_SNAKE_CASE : Tuple = n_head
SCREAMING_SNAKE_CASE : Union[str, Any] = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : List[str] = embd_pdrop
SCREAMING_SNAKE_CASE : Tuple = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = summary_type
SCREAMING_SNAKE_CASE : int = summary_use_proj
SCREAMING_SNAKE_CASE : Union[str, Any] = summary_activation
SCREAMING_SNAKE_CASE : List[str] = summary_first_dropout
SCREAMING_SNAKE_CASE : Any = summary_proj_to_labels
super().__init__(**A )
| 246
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Any = 1
snake_case__ : Optional[Any] = 3
snake_case__ : Optional[Any] = (32, 32)
snake_case__ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def lowerCamelCase ( self : Optional[int] ):
torch.manual_seed(0 )
snake_case__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCamelCase ( self : Any ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(__lowerCamelCase )
@property
def lowerCamelCase ( self : str ):
def extract(*snake_case_ : Any , **snake_case_ : str ):
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple ):
snake_case__ : Any = torch.ones([0] )
def lowerCamelCase ( self : List[Any] , snake_case_ : Optional[Any] ):
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def lowerCamelCase ( self : int ):
snake_case__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.dummy_cond_unet
snake_case__ : int = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
snake_case__ : Union[str, Any] = self.dummy_vae
snake_case__ : Optional[Any] = self.dummy_text_encoder
snake_case__ : int = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
snake_case__ : Dict = 77
snake_case__ : int = self.dummy_image.to(__lowerCamelCase )
snake_case__ : List[str] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case__ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
snake_case__ : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
snake_case__ : List[str] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case__ : Any = '''A painting of a squirrel eating a burger'''
snake_case__ : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
snake_case__ : List[str] = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__lowerCamelCase , )
snake_case__ : List[Any] = output.images
snake_case__ : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
snake_case__ : Optional[Any] = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase ( self : Any ):
snake_case__ : Any = self.dummy_cond_unet
snake_case__ : Dict = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
snake_case__ : List[Any] = self.dummy_vae
snake_case__ : Optional[int] = self.dummy_text_encoder
snake_case__ : Tuple = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
snake_case__ : int = 77
snake_case__ : List[Any] = self.dummy_image.to(__lowerCamelCase )
# put models in fp16
snake_case__ : Dict = unet.half()
snake_case__ : Tuple = vae.half()
snake_case__ : int = bert.half()
# make sure here that pndm scheduler skips prk
snake_case__ : Dict = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
snake_case__ : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
snake_case__ : str = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case__ : List[Any] = '''A painting of a squirrel eating a burger'''
snake_case__ : str = torch.manual_seed(0 )
snake_case__ : int = alt_pipe(
[prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type="""np""" , image=__lowerCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase ( self : Tuple ):
snake_case__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : str = init_image.resize((760, 504) )
snake_case__ : List[Any] = '''BAAI/AltDiffusion'''
snake_case__ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
snake_case__ : List[str] = '''A fantasy landscape, trending on artstation'''
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : Tuple = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="""np""" , )
snake_case__ : str = output.images[0]
snake_case__ : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case__ : Optional[Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : List[str] ):
snake_case__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : Optional[int] = init_image.resize((768, 512) )
snake_case__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
snake_case__ : Optional[int] = '''BAAI/AltDiffusion'''
snake_case__ : Dict = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
snake_case__ : List[Any] = '''A fantasy landscape, trending on artstation'''
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="""np""" , )
snake_case__ : int = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 35
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
if not conversation_id:
__A : List[Any] = uuid.uuida()
if past_user_inputs is None:
__A : List[str] = []
if generated_responses is None:
__A : Tuple = []
__A : uuid.UUID = conversation_id
__A : List[str] = past_user_inputs
__A : List[str] = generated_responses
__A : Optional[str] = text
def __eq__( self , __lowerCamelCase ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__A : str = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__A : Union[str, Any] = text
def UpperCamelCase__( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__A : List[Any] = None
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
self.generated_responses.append(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__A : Optional[Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__A : Tuple = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__A : Union[str, Any] = self.tokenizer.eos_token
def UpperCamelCase__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A : str = {}
__A : List[str] = {}
__A : Any = {}
if min_length_for_response is not None:
__A : int = min_length_for_response
if minimum_tokens is not None:
__A : Any = minimum_tokens
if "max_length" in generate_kwargs:
__A : List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__A : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __lowerCamelCase , __lowerCamelCase=0 , **__lowerCamelCase ):
'''simple docstring'''
__A : Any = super().__call__(__lowerCamelCase , num_workers=__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1:
return outputs[0]
return outputs
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=32 ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__A : List[Any] = self.tokenizer._build_conversation_input_ids(__lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__A : int = self._legacy_parse_and_tokenize(__lowerCamelCase )
if self.framework == "pt":
__A : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__A : int = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=10 , **__lowerCamelCase ):
'''simple docstring'''
__A : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__A : str = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__A : str = max_length - minimum_tokens
__A : Any = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__A : Union[str, Any] = model_inputs['''attention_mask'''][:, -trim:]
__A : Dict = model_inputs.pop('''conversation''' )
__A : List[str] = max_length
__A : Dict = self.model.generate(**__lowerCamelCase , **__lowerCamelCase )
if self.model.config.is_encoder_decoder:
__A : Any = 1
else:
__A : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=True ):
'''simple docstring'''
__A : int = model_outputs['''output_ids''']
__A : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , )
__A : Dict = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__lowerCamelCase )
return conversation
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = self.tokenizer.eos_token_id
__A : List[str] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
if len(__lowerCamelCase ) > self.tokenizer.model_max_length:
__A : List[str] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 179
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 164
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 164
| 1
|
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase = list[tuple[int, int]]
_UpperCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCamelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : List[str] , _a : int , _a : int , _a : int , _a : int , _a : float , _a : Node | None , ) -> List[str]:
__lowerCamelCase : int = pos_x
__lowerCamelCase : List[Any] = pos_y
__lowerCamelCase : Dict = (pos_y, pos_x)
__lowerCamelCase : Union[str, Any] = goal_x
__lowerCamelCase : Any = goal_y
__lowerCamelCase : Optional[Any] = g_cost
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[Any] = self.calculate_heuristic()
def _lowercase ( self : Any ) -> float:
__lowerCamelCase : str = abs(self.pos_x - self.goal_x )
__lowerCamelCase : Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : List[str] , _a : Optional[Any] ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , _a : tuple[int, int] , _a : tuple[int, int] ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__lowerCamelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _a )
__lowerCamelCase : str = [self.start]
__lowerCamelCase : list[Node] = []
__lowerCamelCase : int = False
def _lowercase ( self : List[Any] ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCamelCase : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCamelCase : List[str] = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__lowerCamelCase : List[str] = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__lowerCamelCase : Tuple = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , _a : Node ) -> list[Node]:
__lowerCamelCase : Any = []
for action in delta:
__lowerCamelCase : Optional[int] = parent.pos_x + action[1]
__lowerCamelCase : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def _lowercase ( self : Optional[int] , _a : Node | None ) -> Path:
__lowerCamelCase : Any = node
__lowerCamelCase : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCamelCase : str = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_UpperCamelCase = (0, 0)
_UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
_UpperCamelCase = GreedyBestFirst(init, goal)
_UpperCamelCase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_UpperCamelCase = 2
for elem in grid:
print(elem)
| 208
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,) -> Optional[int]:
if config_name_or_path is None:
__lowerCamelCase : List[Any] = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__lowerCamelCase : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCamelCase : Tuple = question_encoder_name_or_path
__lowerCamelCase : Union[str, Any] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__lowerCamelCase : Tuple = RagConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Tuple = gen_config
__lowerCamelCase : List[Any] = question_encoder_config
__lowerCamelCase : str = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase ,_lowerCAmelCase ,config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
__lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 208
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ : str=2 , snake_case__ : Any=3 , snake_case__ : str=16 , snake_case__ : int = 10 , snake_case__ : int = 2 ) -> Optional[Any]:
def get_dataset(snake_case__ : List[Any] ):
UpperCamelCase : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase : List[Any] = get_dataset(snake_case__ )
UpperCamelCase : Optional[int] = get_dataset(snake_case__ )
UpperCamelCase : List[str] = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
UpperCamelCase : Tuple = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=None ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = []
for epoch in range(snake_case__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase : List[Any] = batch
UpperCamelCase : List[Any] = model(snake_case__ )
UpperCamelCase : Optional[int] = torch.nn.functional.mse_loss(snake_case__ , snake_case__ )
accelerator.backward(snake_case__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ) -> Optional[int]:
super().__init__()
UpperCamelCase : List[str] = nn.Parameter(torch.randn(1 ) )
UpperCamelCase : Dict = nn.Parameter(torch.randn(1 ) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
return x * self.a + self.b
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : Union[str, Any] = DummyModel()
UpperCamelCase : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : Optional[int] = dummy_dataloaders()
UpperCamelCase : Union[str, Any] = ProjectConfiguration(total_limit=1, project_dir=SCREAMING_SNAKE_CASE_, automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase : Dict = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ), 1 )
def snake_case_ ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : Union[str, Any] = DummyModel()
UpperCamelCase : List[str] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : Dict = dummy_dataloaders()
# Train baseline
UpperCamelCase : List[str] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save initial
UpperCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE_, 'initial' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : Dict = model.a.item(), model.b.item()
UpperCamelCase : int = optimizer.state_dict()
UpperCamelCase : Optional[int] = train(3, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : Tuple = model.a.item(), model.b.item()
UpperCamelCase : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase : str = DummyModel()
UpperCamelCase : Dict = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : List[Any] = dummy_dataloaders()
UpperCamelCase : str = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : Tuple = model.a.item(), model.b.item()
UpperCamelCase : Any = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = train(2, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save everything
UpperCamelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoint' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : int = model.a.item(), model.b.item()
UpperCamelCase : int = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : List[Any] = DummyModel()
UpperCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : str = dummy_dataloaders()
UpperCamelCase : Any = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase : Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_, project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) : Optional[int] = model.a.item(), model.b.item()
UpperCamelCase : List[Any] = optimizer.state_dict()
UpperCamelCase : List[str] = train(3, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : Dict = model.a.item(), model.b.item()
UpperCamelCase : str = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase : List[Any] = DummyModel()
UpperCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase , UpperCamelCase : List[Any] = dummy_dataloaders()
UpperCamelCase : Union[str, Any] = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_, project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_0' ) )
((UpperCamelCase) , (UpperCamelCase)) : List[str] = model.a.item(), model.b.item()
UpperCamelCase : Optional[Any] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = train(2, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_1' ) )
test_rands += train(1, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) : int = model.a.item(), model.b.item()
UpperCamelCase : str = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : List[str] = torch.tensor([1, 2, 3] )
UpperCamelCase : Any = torch.tensor([2, 3, 4] )
UpperCamelCase : Dict = DummyModel()
UpperCamelCase : Tuple = torch.optim.Adam(net.parameters() )
UpperCamelCase : Optional[Any] = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def snake_case_ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : List[Any] = DummyModel()
UpperCamelCase : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
UpperCamelCase : List[str] = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_, step_size=1, gamma=0.99 )
UpperCamelCase , UpperCamelCase : Dict = dummy_dataloaders()
UpperCamelCase : str = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase : int = Accelerator(project_dir=SCREAMING_SNAKE_CASE_, project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
UpperCamelCase : str = scheduler.state_dict()
train(3, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_, scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_0' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_, scheduler.state_dict() )
def snake_case_ ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : List[str] = DummyModel()
UpperCamelCase : int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_, total_limit=2 )
# Train baseline
UpperCamelCase : Tuple = Accelerator(project_dir=SCREAMING_SNAKE_CASE_, project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_, 'checkpoints', 'checkpoint_10' ) ) )
@require_cuda
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_, env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '''/tmp/accelerate/state_checkpointing'''
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__UpperCAmelCase = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__UpperCAmelCase = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 103
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : List[str] = 9
UpperCamelCase : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase : int = kruskal(snake_case__ , snake_case__ )
UpperCamelCase : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(snake_case__ ) == sorted(snake_case__ )
| 103
| 1
|
import argparse
import os
import re
import packaging.version
UpperCamelCase = """examples/"""
UpperCamelCase = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCamelCase = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCamelCase = """README.md"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A_ : List[str] = f.read()
A_ , A_ : Union[str, Any] = REPLACE_PATTERNS[pattern]
A_ : Dict = replace.replace('''VERSION''' , SCREAMING_SNAKE_CASE )
A_ : List[Any] = re_pattern.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , pattern='''examples''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Any = '''🤗 Transformers currently provides the following architectures'''
A_ : Optional[Any] = '''1. Want to contribute a new model?'''
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A_ : Optional[int] = f.readlines()
# Find the start of the list.
A_ : Dict = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
A_ : int = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
A_ : Optional[int] = f.read()
A_ : Dict = REPLACE_PATTERNS['''init'''][0].search(SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=False ):
A_ : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
A_ : int = default_version.base_version
elif patch:
A_ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ : Optional[int] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ : List[str] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
A_ : str = default_version
print(f'''Updating version to {version}.''' )
global_version_update(SCREAMING_SNAKE_CASE , patch=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Dict = get_version()
A_ : Tuple = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ : str = current_version.base_version
# Check with the user we got that right.
A_ : Union[str, Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
A_ : Tuple = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(SCREAMING_SNAKE_CASE )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 186
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Tuple = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and feature.dtype == "binary":
A_ : Union[str, Any] = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None if batch_size is np.inf else batch_size
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->str:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : str = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ : Optional[int] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A_ : Union[str, Any] = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.streaming:
A_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : List[str] = None
A_ : List[str] = None
A_ : List[Any] = None
A_ : Dict = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = dataset
A_ : Union[str, Any] = path_or_buf
A_ : Any = batch_size or get_writer_batch_size(dataset.features )
A_ : Optional[int] = parquet_writer_kwargs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A_ : str = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A_ : Tuple = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : List[Any] = 0
A_ : int = parquet_writer_kwargs.pop('''path_or_buf''' , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.dataset.features.arrow_schema
A_ : List[str] = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A_ : List[Any] = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 186
| 1
|
import string
from math import logaa
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
UpperCamelCase__ : int = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
UpperCamelCase__ : Optional[Any] = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[int, int]:
UpperCamelCase__ : Union[str, Any] = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCamelCase__ : str = corpus_without_punctuation.split("\n" )
UpperCamelCase__ : str = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowerCAmelCase ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
return round(tf * idf , 3 )
| 196
|
import argparse
import os
import re
import packaging.version
lowerCamelCase : Optional[Any] ='''examples/'''
lowerCamelCase : List[Any] ={
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCamelCase : List[str] ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCamelCase : int ='''README.md'''
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ : List[Any] = f.read()
UpperCamelCase__ , UpperCamelCase__ : List[str] = REPLACE_PATTERNS[pattern]
UpperCamelCase__ : Union[str, Any] = replace.replace("VERSION" , __lowerCAmelCase )
UpperCamelCase__ : Tuple = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="examples" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = "🤗 Transformers currently provides the following architectures"
UpperCamelCase__ : Tuple = "1. Want to contribute a new model?"
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ : Optional[int] = f.readlines()
# Find the start of the list.
UpperCamelCase__ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase__ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCamelCase__ : str = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCamelCase__ : str = f.read()
UpperCamelCase__ : Dict = REPLACE_PATTERNS["init"][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=False ) -> Optional[int]:
UpperCamelCase__ : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCamelCase__ : List[str] = default_version.base_version
elif patch:
UpperCamelCase__ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCamelCase__ : Tuple = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCamelCase__ : Tuple = input(f'Which version are you releasing? [{default_version}]' )
if len(__lowerCAmelCase ) == 0:
UpperCamelCase__ : Any = default_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCamelCase__ : str = get_version()
UpperCamelCase__ : Dict = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCamelCase__ : int = current_version.base_version
# Check with the user we got that right.
UpperCamelCase__ : List[str] = input(f'Which version are we developing now? [{dev_version}]' )
if len(__lowerCAmelCase ) == 0:
UpperCamelCase__ : Optional[Any] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCamelCase : Optional[Any] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 196
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.