code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 271
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : str = """distilbert"""
__lowerCAmelCase : str = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self :Dict ,__lowercase :Tuple=3_0_5_2_2 ,__lowercase :Optional[Any]=5_1_2 ,__lowercase :List[str]=False ,__lowercase :List[str]=6 ,__lowercase :Optional[Any]=1_2 ,__lowercase :Tuple=7_6_8 ,__lowercase :int=4 * 7_6_8 ,__lowercase :List[Any]=0.1 ,__lowercase :List[str]=0.1 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :List[str]=0.02 ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.2 ,__lowercase :Union[str, Any]=0 ,**__lowercase :Optional[Any] ,):
snake_case__ : List[str] = vocab_size
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Optional[int] = sinusoidal_pos_embds
snake_case__ : str = n_layers
snake_case__ : List[Any] = n_heads
snake_case__ : Tuple = dim
snake_case__ : str = hidden_dim
snake_case__ : int = dropout
snake_case__ : Dict = attention_dropout
snake_case__ : Tuple = activation
snake_case__ : int = initializer_range
snake_case__ : Optional[Any] = qa_dropout
snake_case__ : Union[str, Any] = seq_classif_dropout
super().__init__(**__lowercase ,pad_token_id=__lowercase )
class a ( __lowerCamelCase ):
@property
def __lowerCamelCase ( self :Union[str, Any] ):
if self.task == "multiple-choice":
snake_case__ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 230
| 0
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : List[str] , __lowercase : bool = True ):
'''simple docstring'''
__a = {} # dictionary of lists
__a = directed
def UpperCamelCase_ ( self : Dict , __lowercase : T , __lowercase : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
self.adj_list[destination_vertex].append(__lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
__a = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowercase )
__a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__a = [destination_vertex]
__a = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
__a = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__a = [destination_vertex]
__a = []
return self
def __repr__( self : List[str] ):
'''simple docstring'''
return pformat(self.adj_list )
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
from __future__ import annotations
from typing import Any
def __UpperCamelCase ( _A : list ) ->int:
"""simple docstring"""
if not postfix_notation:
return 0
lowerCamelCase_ ={"""+""", """-""", """*""", """/"""}
lowerCamelCase_ =[]
for token in postfix_notation:
if token in operations:
lowerCamelCase_ , lowerCamelCase_ =stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
from math import pi, sqrt
def __UpperCamelCase ( _A : float ) ->float:
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(_A ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(_A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__A : List[Any] = 1.0
while num:
__A : str = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 49
| 0
|
from typing import Any
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.array_equal(snake_case_,matrix.conjugate().T )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = v.conjugate().T
_A : Any = v_star.dot(snake_case_ )
assert isinstance(snake_case_,np.ndarray )
return (v_star_dot.dot(snake_case_ )) / (v_star.dot(snake_case_ ))
def lowerCAmelCase_ ( ):
_A : str = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_A : str = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case_ ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(snake_case_,snake_case_ ) )
_A : List[str] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case_ ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(snake_case_,snake_case_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 26
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26
| 1
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'vocab.json'}
UpperCamelCase = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
UpperCamelCase = {'mgp-str': 27}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict="[GO]" , SCREAMING_SNAKE_CASE__ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE__ : str="[s]" , SCREAMING_SNAKE_CASE__ : int="[GO]" , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
super().__init__(
unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def a ( self : List[str] ) -> List[Any]:
return len(self.vocab )
def a ( self : str ) -> List[str]:
return dict(self.vocab , **self.added_tokens_encoder )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE__ )
return char_tokens
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return self.vocab.get(SCREAMING_SNAKE_CASE__ , self.vocab.get(self.unk_token ) )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("Vocabulary path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + "\n" )
return (vocab_file,)
| 221
|
from statistics import mean, stdev
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = min(lowerCAmelCase_ )
lowerCAmelCase__ = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase_ ) for x in data]
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = mean(lowerCAmelCase_ )
lowerCAmelCase__ = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase_ ) for x in data]
| 221
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase_ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase_ )
while queue:
__SCREAMING_SNAKE_CASE = queue.pop(0 )
cnt += 1
topo.append(lowerCAmelCase_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCAmelCase_ )
if cnt != len(lowerCAmelCase_ ):
print("Cycle exists" )
else:
print(lowerCAmelCase_ )
# Adjacency List of Graph
a__ : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 54
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
A__ : Dict =PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
A__ : int =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ : str =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
A__ : Optional[int] =CLIPTextModel(lowerCAmelCase_ )
A__ : Dict =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : str ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=0 ) -> List[str]:
'''simple docstring'''
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
A__ : List[str] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A__ : List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : List[str] =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
A__ : int =Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A__ : str =torch.manual_seed(lowerCAmelCase_ )
else:
A__ : Tuple =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A__ : Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Tuple =self.get_dummy_components()
A__ : str =StableDiffusionInpaintPipeline(**lowerCAmelCase_ )
A__ : Any =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Dict =sd_pipe(**lowerCAmelCase_ ).images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[Any] =np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
A__ : Optional[Any] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : int =StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : str =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : Tuple =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : List[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
A__ : int ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : List[Any] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Union[str, Any] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Union[str, Any] =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : str =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Union[str, Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[str] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : Any =PNDMScheduler.from_pretrained(lowerCAmelCase_ , subfolder="""scheduler""" )
A__ : Optional[int] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Any =torch.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
A__ : Dict =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 134
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__UpperCAmelCase ="https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def __lowerCAmelCase ( UpperCamelCase__ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__lowerCamelCase = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__lowerCamelCase = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__lowerCamelCase = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 362
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a__ :
def __init__( self : List[Any] , a : Tuple , a : int , a : int ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(a )][self.get_x(a )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int ):
"""simple docstring"""
return int(self.ratio_x * x )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : int ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase =8_0_0, 6_0_0
__UpperCAmelCase =imread("image_data/lena.jpg", 1)
__UpperCAmelCase =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 237
| 0
|
from __future__ import annotations
import math
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
if num <= 0:
_SCREAMING_SNAKE_CASE = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(snake_case__ )
_SCREAMING_SNAKE_CASE = [True] * (num + 1)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start ,num + 1 ,snake_case__ ):
if sieve[i] is True:
_SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 ,num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 306
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 0
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 25
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
| 0
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_UpperCAmelCase = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
_UpperCAmelCase = load_dataset('nielsr/rvlcdip-demo' )
_UpperCAmelCase = dataset['train'][0]['image'].convert('RGB' )
_UpperCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=_SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 360
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __a :
_a : Dict = BlenderbotConfig
_a : Dict = {}
_a : Union[str, Any] = 'gelu'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_blenderbot_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFBlenderbotModel(config=_SCREAMING_SNAKE_CASE ).get_decoder()
_UpperCAmelCase = inputs_dict['input_ids']
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
_UpperCAmelCase = inputs_dict['head_mask']
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1e-3 )
def lowerCAmelCase__ ( a__: Dict , a__: Dict , a__: Any , a__: Any=None , a__: List[Any]=None , a__: Union[str, Any]=None , a__: Tuple=None , a__: Union[str, Any]=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : List[Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_a : List[str] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_a : List[str] = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a : Dict = True
_a : int = False
_a : Union[str, Any] = False
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFBlenderbotModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
@require_tokenizers
@require_tf
class __a ( unittest.TestCase ):
_a : int = ['My friends are cool but they eat too many carbs.']
_a : List[Any] = 'facebook/blenderbot-400M-distill'
@cached_property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(self.src_text , return_tensors='tf' )
_UpperCAmelCase = self.model.generate(
model_inputs.input_ids , )
_UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 185
| 0
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__lowerCamelCase = get_logger(__name__)
__lowerCamelCase = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class UpperCamelCase__:
@add_start_docstrings(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCamelCase__:
@add_start_docstrings(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCamelCase__( __A ):
@add_start_docstrings(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> jnp.ndarray:
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(__UpperCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
else:
A__ = processor(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
return scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Tuple:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
A__ = scores / self.temperature
return scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase = -float('Inf' ) ,__UpperCAmelCase = 1 ) -> List[Any]:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
A__ , A__ = lax.top_k(__UpperCAmelCase ,scores.shape[-1] )
A__ = jnp.full_like(__UpperCAmelCase ,self.filter_value )
A__ = jax.nn.softmax(__UpperCAmelCase ,axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(__UpperCAmelCase ,1 )
score_mask |= score_mask.at[:, 0].set(__UpperCAmelCase )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(__UpperCAmelCase )
A__ = jnp.where(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
A__ = jax.lax.sort_key_val(__UpperCAmelCase ,__UpperCAmelCase )[-1]
return next_scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase = -float('Inf' ) ,__UpperCAmelCase = 1 ) -> str:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(__UpperCAmelCase ,__UpperCAmelCase )
A__ = filter_value
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size ,self.filter_value )
A__ = min(self.top_k ,scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(__UpperCAmelCase ,__UpperCAmelCase )
A__ = jnp.broadcast_to((jnp.arange(__UpperCAmelCase ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(__UpperCAmelCase )
A__ = next_scores_flat.reshape(__UpperCAmelCase ,__UpperCAmelCase )
return next_scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = bos_token_id
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
A__ = jnp.full(scores.shape ,-float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(__UpperCAmelCase ,new_scores.at[:, self.bos_token_id].set(0 ) ,__UpperCAmelCase )
return scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
A__ = max_length
A__ = eos_token_id
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
A__ = jnp.full(scores.shape ,-float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(__UpperCAmelCase ,new_scores.at[:, self.eos_token_id].set(0 ) ,__UpperCAmelCase )
return scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
A__ = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
A__ = jnp.where(__UpperCAmelCase ,scores.at[:, self.eos_token_id].set(-float('inf' ) ) ,__UpperCAmelCase )
return scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = list(__UpperCAmelCase )
A__ = begin_index
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(__UpperCAmelCase ,scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) ,__UpperCAmelCase )
return scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Any:
A__ = list(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = dict(__UpperCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(__UpperCAmelCase )
A__ = jnp.intaa(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> jnp.ndarray:
def _force_token(__UpperCAmelCase ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(__UpperCAmelCase ,dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
A__ = lax.dynamic_update_slice(__UpperCAmelCase ,__UpperCAmelCase ,(0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(__UpperCAmelCase ) ,lambda: scores ,) ,)
return scores
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__UpperCAmelCase ,'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
# suppress <|notimestamps|> which is handled by without_timestamps
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 ,__UpperCAmelCase ,__UpperCAmelCase )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,__UpperCAmelCase ,)
A__ = jnp.where((cur_len - self.begin_index) < 2 ,__UpperCAmelCase ,__UpperCAmelCase )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,__UpperCAmelCase ,__UpperCAmelCase ,)
return jnp.where(
__UpperCAmelCase ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) ,scores_k.at[: self.eos_token_id].set(-float('inf' ) ) ,) ,__UpperCAmelCase ,)
A__ = jax.vmap(__UpperCAmelCase )(__UpperCAmelCase ,__UpperCAmelCase )
A__ = jnp.where(cur_len == self.begin_index ,__UpperCAmelCase ,__UpperCAmelCase )
A__ = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,__UpperCAmelCase ,)
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
__UpperCAmelCase ,scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) ,__UpperCAmelCase ,)
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(__UpperCAmelCase ,axis=-1 )
def handle_cumulative_probs(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) ,__UpperCAmelCase ,)
A__ = jax.vmap(__UpperCAmelCase )(__UpperCAmelCase ,__UpperCAmelCase )
return scores
| 221
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = 'time_series_transformer'
lowerCAmelCase__ : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "student_t" ,__UpperCAmelCase = "nll" ,__UpperCAmelCase = 1 ,__UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] ,__UpperCAmelCase = "mean" ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 0 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = 32 ,__UpperCAmelCase = 32 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = True ,__UpperCAmelCase = "gelu" ,__UpperCAmelCase = 64 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 1_00 ,__UpperCAmelCase = 0.0_2 ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> List[str]:
# time series specific configuration
A__ = prediction_length
A__ = context_length or prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A__ = cardinality
else:
A__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A__ = embedding_dimension
else:
A__ = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(__UpperCAmelCase ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
super().__init__(is_encoder_decoder=__UpperCAmelCase ,**__UpperCAmelCase )
@property
def snake_case__ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 221
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''distilbert'''
_lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self ,lowerCamelCase_=3_0_5_2_2 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=False ,lowerCamelCase_=6 ,lowerCamelCase_=1_2 ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=4 * 7_6_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.02 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.2 ,lowerCamelCase_=0 ,**lowerCamelCase_ ,) -> Dict:
A = vocab_size
A = max_position_embeddings
A = sinusoidal_pos_embds
A = n_layers
A = n_heads
A = dim
A = hidden_dim
A = dropout
A = attention_dropout
A = activation
A = initializer_range
A = qa_dropout
A = seq_classif_dropout
super().__init__(**lowerCamelCase_ ,pad_token_id=lowerCamelCase_ )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 353
|
"""simple docstring"""
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = len(_a ) + 1
A = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
A = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
A = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
A = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
A = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
A = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
A = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
A = dp[i - 1][j]
else:
A = 0
else:
A = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCAmelCase ="aab"
UpperCAmelCase ="c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 77
| 0
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase = None
try:
import msvcrt
except ImportError:
lowerCAmelCase = None
try:
import fcntl
except ImportError:
lowerCAmelCase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase = OSError
# Data
# ------------------------------------------------
lowerCAmelCase = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
lowerCAmelCase = "3.0.12"
lowerCAmelCase = None
def lowerCAmelCase_ ( ) ->Union[str, Any]:
global _logger
lowerCamelCase__ : Tuple =_logger or logging.getLogger(__name__ )
return _logger
class A_ ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : str =lock_file
return None
def __str__( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Dict =f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : str =lock
return None
def __enter__( self :Dict ):
"""simple docstring"""
return self.lock
def __exit__( self :int , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any ):
"""simple docstring"""
self.lock.release()
return None
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int]=-1 , lowerCamelCase_ :Optional[int]=None ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowerCamelCase__ : int =self.hash_filename_if_too_long(lowercase_ , lowercase_ )
# The path to the lock file.
lowerCamelCase__ : Union[str, Any] =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCamelCase__ : Dict =None
# The default timeout value.
lowerCamelCase__ : List[Any] =timeout
# We use this lock primarily for the lock counter.
lowerCamelCase__ : int =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCamelCase__ : List[str] =0
return None
@property
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =float(lowercase_ )
return None
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Tuple=0.05 ):
"""simple docstring"""
if timeout is None:
lowerCamelCase__ : int =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCamelCase__ : Any =id(self )
lowerCamelCase__ : Tuple =self._lock_file
lowerCamelCase__ : Optional[int] =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(lowercase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCamelCase__ : Dict =max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :Optional[int]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCamelCase__ : List[str] =id(self )
lowerCamelCase__ : List[Any] =self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
lowerCamelCase__ : Union[str, Any] =0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self :int ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :Any ):
"""simple docstring"""
self.release()
return None
def __del__( self :str ):
"""simple docstring"""
self.release(force=lowercase_ )
return None
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Dict =os.path.basename(lowercase_ )
if len(lowercase_ ) > max_length and max_length > 0:
lowerCamelCase__ : List[Any] =os.path.dirname(lowercase_ )
lowerCamelCase__ : Dict =str(hash(lowercase_ ) )
lowerCamelCase__ : Any =filename[: max_length - len(lowercase_ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowercase_ , lowercase_ )
else:
return path
class A_ ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any]=-1 , lowerCamelCase_ :Dict=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_ )
lowerCamelCase__ : Any ='\\\\?\\' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCamelCase__ : int =os.open(self._lock_file , lowercase_ )
except OSError:
pass
else:
try:
msvcrt.locking(lowercase_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowercase_ )
else:
lowerCamelCase__ : List[Any] =fd
return None
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : str =self._lock_file_fd
lowerCamelCase__ : Union[str, Any] =None
msvcrt.locking(lowercase_ , msvcrt.LK_UNLCK , 1 )
os.close(lowercase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A_ ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str]=-1 , lowerCamelCase_ :List[Any]=None ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =os.statvfs(os.path.dirname(lowercase_ ) ).f_namemax
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_ )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Dict =os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCamelCase__ : int =os.open(self._lock_file , lowercase_ )
try:
fcntl.flock(lowercase_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowercase_ )
else:
lowerCamelCase__ : List[Any] =fd
return None
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self._lock_file_fd
lowerCamelCase__ : int =None
fcntl.flock(lowercase_ , fcntl.LOCK_UN )
os.close(lowercase_ )
return None
class A_ ( UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCamelCase__ : Optional[Any] =os.open(self._lock_file , lowercase_ )
except OSError:
pass
else:
lowerCamelCase__ : Any =fd
return None
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
os.close(self._lock_file_fd )
lowerCamelCase__ : Dict =None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase = None
if msvcrt:
lowerCAmelCase = WindowsFileLock
elif fcntl:
lowerCAmelCase = UnixFileLock
else:
lowerCAmelCase = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 126
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowercase = KandinskyVaaPipeline
__lowercase = [
"""image_embeds""",
"""negative_image_embeds""",
]
__lowercase = ["""image_embeds""", """negative_image_embeds"""]
__lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase = False
@property
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
return 32
@property
def UpperCAmelCase_ ( self :int )-> List[Any]:
return 32
@property
def UpperCAmelCase_ ( self :Optional[Any] )-> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self :Any )-> List[Any]:
return 1_00
@property
def UpperCAmelCase_ ( self :Union[str, Any] )-> Union[str, Any]:
torch.manual_seed(0 )
A__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A__ = UNetaDConditionModel(**lowercase_ )
return model
@property
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self :Dict )-> Union[str, Any]:
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowercase_ , )
A__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self :Dict , lowercase_ :int , lowercase_ :Union[str, Any]=0 )-> Dict:
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase_ )
if str(lowercase_ ).startswith("mps" ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowercase_ )
A__ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
A__ = pipe(**self.get_dummy_inputs(lowercase_ ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Dict )-> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self :Any )-> List[Any]:
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
A__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
A__ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
A__ = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
A__ = "red cat, 4k photo"
A__ = torch.Generator(device="cuda" ).manual_seed(0 )
A__, A__ = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A__ = torch.Generator(device="cuda" ).manual_seed(0 )
A__ = pipeline(
image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=1_00 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 237
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( __lowercase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _lowerCAmelCase ( _a ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _lowerCAmelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 350
|
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def a__ ( snake_case__ ) -> Tuple:
if hor == 1_28:
lowerCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase = (32, 1_28, 2_56)
lowerCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase = (32, 64, 1_28, 2_56)
lowerCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
lowerCamelCase = model.state_dict()
lowerCamelCase = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def a__ ( ) -> Optional[int]:
lowerCamelCase = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase = model
lowerCamelCase = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 168
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCamelCase ( __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = 0
while b > 0:
if b & 1:
UpperCamelCase__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 80
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 0
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.1 , snake_case__=True , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= parent
lowercase__ : List[str]= batch_size
lowercase__ : List[str]= seq_length
lowercase__ : int= is_training
lowercase__ : Tuple= use_input_mask
lowercase__ : Dict= use_token_type_ids
lowercase__ : str= use_labels
lowercase__ : Any= vocab_size
lowercase__ : int= hidden_size
lowercase__ : Optional[Any]= num_hidden_layers
lowercase__ : Dict= num_attention_heads
lowercase__ : Union[str, Any]= intermediate_multiple_size
lowercase__ : Optional[Any]= hidden_act
lowercase__ : Any= hidden_dropout
lowercase__ : Union[str, Any]= attention_dropout
lowercase__ : List[Any]= weight_tying
lowercase__ : Any= max_position_embeddings
lowercase__ : Optional[Any]= type_vocab_size
lowercase__ : Dict= type_sequence_label_size
lowercase__ : int= initializer_range
lowercase__ : Optional[Any]= num_labels
lowercase__ : int= num_choices
lowercase__ : Optional[int]= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Optional[Any]= None
if self.use_input_mask:
lowercase__ : List[Any]= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int]= None
if self.use_labels:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : List[str]= self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= self.prepare_config_and_inputs()
lowercase__ : Tuple= True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= GPTNeoXJapaneseModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ : List[str]= model(_lowercase , attention_mask=_lowercase )
lowercase__ : Optional[int]= model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= True
lowercase__ : Optional[Any]= GPTNeoXJapaneseModel(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ : int= model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ : List[Any]= model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= True
lowercase__ : int= GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
lowercase__ : Any= model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
lowercase__ : Union[str, Any]= outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : List[Any]= ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : Tuple= ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ : Any= torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ : List[str]= torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ : Dict= model(_lowercase , attention_mask=_lowercase , output_hidden_states=_lowercase )
lowercase__ : Dict= output_from_no_past["hidden_states"][0]
lowercase__ : List[Any]= model(
_lowercase , attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["hidden_states"][0]
# select random slice
lowercase__ : Union[str, Any]= ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ : int= output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : str= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__, lowercase__ : str= config_and_inputs
lowercase__ : List[str]= {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__lowerCamelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= GPTNeoXJapaneseModelTester(self )
lowercase__ : Union[str, Any]= ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__, lowercase__, lowercase__ : str= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__ : Any= None
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowercase )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= "abeja/gpt-neox-japanese-2.7b"
lowercase__ : List[str]= ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
lowercase__ : Tuple= [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
lowercase__ : List[Any]= GPTNeoXJapaneseTokenizer.from_pretrained(_lowercase )
lowercase__ : Optional[int]= GPTNeoXJapaneseForCausalLM.from_pretrained(_lowercase )
lowercase__ : str= []
for prompt in prompts:
lowercase__ : Dict= tokenizer(_lowercase , return_tensors="pt" ).input_ids
lowercase__ : Dict= model.generate(_lowercase , max_length=50 )
lowercase__ : Optional[Any]= tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
predicted_outputs += generated_string
self.assertListEqual(_lowercase , _lowercase )
| 366
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
a : Any = get_logger(__name__)
a : Any = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class __UpperCAmelCase:
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __UpperCAmelCase:
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
for processor in self:
lowercase__ : Optional[Any]= inspect.signature(processor.__call__ ).parameters
if len(snake_case__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
lowercase__ : Union[str, Any]= processor(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
else:
lowercase__ : Dict= processor(snake_case__ , snake_case__ , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
lowercase__ : Any= temperature
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : int= scores / self.temperature
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = -float("Inf" ) , snake_case__ = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(snake_case__ , snake_case__ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
lowercase__ : int= top_p
lowercase__ : Optional[int]= filter_value
lowercase__ : Tuple= min_tokens_to_keep
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Dict= lax.top_k(snake_case__ , scores.shape[-1] )
lowercase__ : Optional[int]= jnp.full_like(snake_case__ , self.filter_value )
lowercase__ : Union[str, Any]= jax.nn.softmax(snake_case__ , axis=-1 ).cumsum(axis=-1 )
lowercase__ : str= cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowercase__ : str= jnp.roll(snake_case__ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case__ )
# min tokens to keep
lowercase__ : Optional[int]= score_mask.at[:, : self.min_tokens_to_keep].set(snake_case__ )
lowercase__ : str= jnp.where(snake_case__ , snake_case__ , snake_case__ )
lowercase__ : str= jax.lax.sort_key_val(snake_case__ , snake_case__ )[-1]
return next_scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = -float("Inf" ) , snake_case__ = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
lowercase__ : List[Any]= max(snake_case__ , snake_case__ )
lowercase__ : Dict= filter_value
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= scores.shape
lowercase__ : int= jnp.full(batch_size * vocab_size , self.filter_value )
lowercase__ : Dict= min(self.top_k , scores.shape[-1] ) # Safety check
lowercase__, lowercase__ : List[Any]= lax.top_k(snake_case__ , snake_case__ )
lowercase__ : Optional[int]= jnp.broadcast_to((jnp.arange(snake_case__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowercase__ : str= topk_scores.flatten()
lowercase__ : Any= topk_indices.flatten() + shift
lowercase__ : Optional[Any]= next_scores_flat.at[topk_indices_flat].set(snake_case__ )
lowercase__ : str= next_scores_flat.reshape(snake_case__ , snake_case__ )
return next_scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= bos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= jnp.full(scores.shape , -float("inf" ) )
lowercase__ : int= 1 - jnp.bool_(cur_len - 1 )
lowercase__ : int= jnp.where(snake_case__ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= max_length
lowercase__ : str= eos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= jnp.full(scores.shape , -float("inf" ) )
lowercase__ : Any= 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowercase__ : Optional[int]= jnp.where(snake_case__ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(snake_case__ , snake_case__ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
lowercase__ : List[str]= min_length
lowercase__ : Dict= eos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
# create boolean flag to decide if min length penalty should be applied
lowercase__ : Tuple= 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowercase__ : Dict= jnp.where(snake_case__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= list(snake_case__ )
lowercase__ : List[Any]= begin_index
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= 1 - jnp.bool_(cur_len - self.begin_index )
lowercase__ : str= jnp.where(snake_case__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= list(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : int= dict(snake_case__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowercase__ : List[Any]= jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowercase__ : List[Any]= force_token_array.at[index].set(snake_case__ )
lowercase__ : int= jnp.intaa(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
def _force_token(snake_case__ ):
lowercase__ : Dict= scores.shape[0]
lowercase__ : Any= self.force_token_array[generation_idx]
lowercase__ : List[Any]= jnp.ones_like(snake_case__ , dtype=scores.dtype ) * -float("inf" )
lowercase__ : List[Any]= jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowercase__ : List[str]= lax.dynamic_update_slice(snake_case__ , snake_case__ , (0, current_token) )
return new_scores
lowercase__ : Dict= lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case__ ) , lambda: scores , ) , )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= generate_config.eos_token_id
lowercase__ : Optional[int]= generate_config.no_timestamps_token_id
lowercase__ : Dict= generate_config.no_timestamps_token_id + 1
lowercase__ : List[Any]= decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case__ , "max_initial_timestamp_index" ):
lowercase__ : int= generate_config.max_initial_timestamp_index
else:
lowercase__ : Dict= model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowercase__ : str= model_config.vocab_size
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
# suppress <|notimestamps|> which is handled by without_timestamps
lowercase__ : int= scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(snake_case__ , snake_case__ ):
lowercase__ : Union[str, Any]= jnp.where((cur_len - self.begin_index) >= 1 , snake_case__ , snake_case__ )
lowercase__ : Tuple= jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case__ , )
lowercase__ : int= jnp.where((cur_len - self.begin_index) < 2 , snake_case__ , snake_case__ )
lowercase__ : Optional[int]= jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case__ , snake_case__ , )
return jnp.where(
snake_case__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , snake_case__ , )
lowercase__ : List[str]= jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
lowercase__ : str= jnp.where(cur_len == self.begin_index , snake_case__ , snake_case__ )
lowercase__ : List[Any]= jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case__ , )
lowercase__ : Any= self.timestamp_begin + self.max_initial_timestamp_index
lowercase__ : str= jnp.where(
snake_case__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , snake_case__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowercase__ : str= jax.nn.log_softmax(snake_case__ , axis=-1 )
def handle_cumulative_probs(snake_case__ , snake_case__ ):
lowercase__ : Dict= jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowercase__ : Union[str, Any]= jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , snake_case__ , )
lowercase__ : Optional[int]= jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
return scores
| 150
| 0
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
a_ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
a_ = logging.WARNING
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = os.getenv("DATASETS_VERBOSITY" , UpperCAmelCase_)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys()) }")
return _default_log_level
def lowerCamelCase__ ( ):
return __name__.split(".")[0]
def lowerCamelCase__ ( ):
return logging.getLogger(_get_library_name())
def lowerCamelCase__ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Any = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def lowerCamelCase__ ( _a = None):
if name is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = _get_library_name()
return logging.getLogger(UpperCAmelCase_)
def lowerCamelCase__ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase__ ( _a):
_get_library_root_logger().setLevel(UpperCAmelCase_)
def lowerCamelCase__ ( ):
return set_verbosity(UpperCAmelCase_)
def lowerCamelCase__ ( ):
return set_verbosity(UpperCAmelCase_)
def lowerCamelCase__ ( ):
return set_verbosity(UpperCAmelCase_)
def lowerCamelCase__ ( ):
return set_verbosity(UpperCAmelCase_)
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[str] = False
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , *a : List[str] , **a : Any ) -> Any: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = args[0] if args else None
def __iter__( self : Any ) -> str:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Optional[Any] , a : Any ) -> Dict:
"""simple docstring"""
def empty_fn(*a : Optional[Any] , **a : Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int ) -> List[Any]:
"""simple docstring"""
return self
def __exit__( self : Optional[int] , a : List[Any] , a : Any , a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return
a_ = True
class _UpperCamelCase :
'''simple docstring'''
def __call__( self : str , *a : int , a : str=False , **a : int ) -> str:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase ( self : int , *a : int , **a : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a_ = _tqdm_cls()
def lowerCamelCase__ ( ):
global _tqdm_active
return bool(_tqdm_active)
def lowerCamelCase__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE : Optional[int] = True
def lowerCamelCase__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE : Tuple = False
| 76
|
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def lowercase_ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 4, 64, 64) , SCREAMING_SNAKE_CASE_=False ) -> str:
__lowerCamelCase : List[str] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Tuple = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return image
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="CompVis/stable-diffusion-v1-4" ) -> Dict:
__lowerCamelCase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[Any] = 'bf16' if fpaa else None
__lowerCamelCase , __lowerCamelCase : str = FlaxUNetaDConditionModel.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='unet' , dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ )
return model, params
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 77, 7_68) , SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
__lowerCamelCase : Any = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 10_00, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.get_latents(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = model.apply(
{'params': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 10_00, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : List[str] = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.get_latents(SCREAMING_SNAKE_CASE_ , shape=(4, 4, 96, 96) , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , shape=(4, 77, 10_24) , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model.apply(
{'params': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Tuple = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
| 185
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class __a (SCREAMING_SNAKE_CASE_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = """switch_transformers"""
_SCREAMING_SNAKE_CASE :int = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE :List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , _a=32_128 , _a=768 , _a=64 , _a=2_048 , _a=64 , _a=12 , _a=3 , _a=12 , _a=3 , _a=12 , _a=8 , _a=False , _a=0.01 , _a="float32" , _a=False , _a=32 , _a=128 , _a=0.1 , _a=1E-6 , _a=0.001 , _a=0.001 , _a=1.0 , _a="relu" , _a=True , _a=False , _a=True , _a=0 , _a=1 , **_a , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : List[Any] = d_kv
SCREAMING_SNAKE_CASE__ : str = d_ff
SCREAMING_SNAKE_CASE__ : str = num_sparse_encoder_layers
SCREAMING_SNAKE_CASE__ : Any = num_layers
SCREAMING_SNAKE_CASE__ : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE__ : Any = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
SCREAMING_SNAKE_CASE__ : List[str] = self.num_layers // self.num_sparse_encoder_layers
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
SCREAMING_SNAKE_CASE__ : Tuple = num_heads
SCREAMING_SNAKE_CASE__ : int = num_experts
SCREAMING_SNAKE_CASE__ : Tuple = expert_capacity
SCREAMING_SNAKE_CASE__ : List[str] = router_bias
SCREAMING_SNAKE_CASE__ : Tuple = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = router_dtype
SCREAMING_SNAKE_CASE__ : Dict = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ : Dict = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : Dict = dropout_rate
SCREAMING_SNAKE_CASE__ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : str = initializer_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = feed_forward_proj
SCREAMING_SNAKE_CASE__ : List[str] = use_cache
SCREAMING_SNAKE_CASE__ : List[str] = add_router_probs
SCREAMING_SNAKE_CASE__ : Optional[int] = router_z_loss_coef
SCREAMING_SNAKE_CASE__ : Optional[int] = router_aux_loss_coef
SCREAMING_SNAKE_CASE__ : List[Any] = self.feed_forward_proj.split("""-""" )
SCREAMING_SNAKE_CASE__ : str = act_info[-1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE__ : int = 'gelu_new'
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
| 350
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__UpperCamelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__UpperCamelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , SCREAMING_SNAKE_CASE_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE_ ) , 5 ) == 1
return output_values
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__UpperCamelCase = self.basis_function(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = 0.0
__UpperCamelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def A__ ( self , SCREAMING_SNAKE_CASE_ = 0.0_1 )-> str:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__UpperCamelCase = [] # x coordinates of points to plot
__UpperCamelCase = [] # y coordinates of points to plot
__UpperCamelCase = 0.0
while t <= 1:
__UpperCamelCase = self.bezier_curve_function(SCREAMING_SNAKE_CASE_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__UpperCamelCase = [i[0] for i in self.list_of_points]
__UpperCamelCase = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 328
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self , a , a , a , a , a , a=0.2 , a=0.2 ) -> Dict:
lowercase__ : Any = bp_numa
lowercase__ : Optional[int] = bp_numa
lowercase__ : Tuple = bp_numa
lowercase__ : Optional[Any] = conva_get[:2]
lowercase__ : Optional[int] = conva_get[2]
lowercase__ : Optional[Any] = size_pa
lowercase__ : Union[str, Any] = rate_w
lowercase__ : Union[str, Any] = rate_t
lowercase__ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : Any = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
# save model dict with pickle
lowercase__ : Optional[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(a , 'wb' ) as f:
pickle.dump(a , a )
print(f"""Model saved: {save_path}""" )
@classmethod
def _UpperCAmelCase ( cls , a ) -> Any:
# read saved model
with open(a , 'rb' ) as f:
lowercase__ : Optional[int] = pickle.load(a ) # noqa: S301
lowercase__ : Optional[int] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase__ : List[Any] = model_dic.get('size_pooling1' )
lowercase__ : Tuple = model_dic.get('num_bp1' )
lowercase__ : int = model_dic.get('num_bp2' )
lowercase__ : int = model_dic.get('num_bp3' )
lowercase__ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase__ : Tuple = model_dic.get('rate_thre' )
# create model instance
lowercase__ : Tuple = CNN(a , a , a , a , a , a , a )
# modify model parameter
lowercase__ : str = model_dic.get('w_conv1' )
lowercase__ : Optional[int] = model_dic.get('wkj' )
lowercase__ : Tuple = model_dic.get('vji' )
lowercase__ : str = model_dic.get('thre_conv1' )
lowercase__ : Union[str, Any] = model_dic.get('thre_bp2' )
lowercase__ : List[str] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self , a ) -> str:
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self , a ) -> Any:
return round(a , 3 )
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[str]:
# convolution process
lowercase__ : int = convs[0]
lowercase__ : Optional[Any] = convs[1]
lowercase__ : int = np.shape(a )[0]
# get the data slice of original image data, data_focus
lowercase__ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , a ):
for j_focus in range(0 , size_data - size_conv + 1 , a ):
lowercase__ : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(a )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(a ):
lowercase__ : Any = []
for i_focus in range(len(a ) ):
lowercase__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(a ) )
lowercase__ : Optional[Any] = np.asmatrix(a ).reshape(
a , a )
data_featuremap.append(a )
# expanding the data slice to One dimenssion
lowercase__ : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(a ) )
lowercase__ : int = np.asarray(a )
return focus_list, data_featuremap
def _UpperCAmelCase ( self , a , a , a="average_pool" ) -> str:
# pooling process
lowercase__ : List[str] = len(featuremaps[0] )
lowercase__ : List[str] = int(size_map / size_pooling )
lowercase__ : str = []
for i_map in range(len(a ) ):
lowercase__ : List[str] = featuremaps[i_map]
lowercase__ : Optional[int] = []
for i_focus in range(0 , a , a ):
for j_focus in range(0 , a , a ):
lowercase__ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(a ) )
lowercase__ : List[Any] = np.asmatrix(a ).reshape(a , a )
featuremap_pooled.append(a )
return featuremap_pooled
def _UpperCAmelCase ( self , a ) -> List[str]:
# expanding three dimension data to one dimension list
lowercase__ : Any = []
for i in range(len(a ) ):
lowercase__ : Optional[int] = np.shape(data[i] )
lowercase__ : int = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__ : str = data_listed.getA().tolist()[0]
data_expanded.extend(a )
lowercase__ : int = np.asarray(a )
return data_expanded
def _UpperCAmelCase ( self , a ) -> Dict:
# expanding matrix to one dimension list
lowercase__ : Dict = np.asarray(a )
lowercase__ : Union[str, Any] = np.shape(a )
lowercase__ : Optional[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[Any]:
lowercase__ : Dict = []
lowercase__ : int = 0
for i_map in range(a ):
lowercase__ : str = np.ones((size_map, size_map) )
for i in range(0 , a , a ):
for j in range(0 , a , a ):
lowercase__ : Optional[Any] = pd_pool[
i_pool
]
lowercase__ : Union[str, Any] = i_pool + 1
lowercase__ : List[Any] = np.multiply(
a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(a )
return pd_all
def _UpperCAmelCase ( self , a , a , a , a , a , a=bool ) -> str:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(a )) )
print((' - - Shape: Teach_Data ', np.shape(a )) )
lowercase__ : int = 0
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowercase__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(a ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ : Optional[int] = np.asmatrix(datas_train[p] )
lowercase__ : int = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ : Union[str, Any] = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Optional[Any] = self.pooling(a , self.size_poolinga )
lowercase__ : Tuple = np.shape(a )
lowercase__ : List[str] = self._expand(a )
lowercase__ : Optional[int] = data_bp_input
lowercase__ : Optional[Any] = np.dot(a , self.vji.T ) - self.thre_bpa
lowercase__ : str = self.sig(a )
lowercase__ : Tuple = np.dot(a , self.wkj.T ) - self.thre_bpa
lowercase__ : Any = self.sig(a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ : int = np.multiply(
(data_teach - bp_outa) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Any = np.multiply(
np.dot(a , self.wkj ) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Optional[int] = np.dot(a , self.vji )
lowercase__ : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ : Any = pd_conva_pooled.T.getA().tolist()
lowercase__ : List[str] = self._calculate_gradient_from_pool(
a , a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ : Tuple = self.rate_weight * np.dot(a , a )
lowercase__ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ : Dict = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ : str = rp + 1
lowercase__ : List[str] = error_count / patterns
all_mse.append(a )
def draw_error():
lowercase__ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(a , '+-' )
plt.plot(a , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(a , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self , a ) -> List[Any]:
# model predict
lowercase__ : Optional[int] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(a )) )
for p in range(len(a ) ):
lowercase__ : List[str] = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ : Tuple = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Any = self.pooling(a , self.size_poolinga )
lowercase__ : Union[str, Any] = self._expand(a )
lowercase__ : Optional[Any] = data_bp_input
lowercase__ : str = bp_outa * self.vji.T - self.thre_bpa
lowercase__ : Optional[Any] = self.sig(a )
lowercase__ : Dict = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ : List[str] = self.sig(a )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ : Optional[int] = [list(map(self.do_round , a ) ) for each in produce_out]
return np.asarray(a )
def _UpperCAmelCase ( self , a ) -> List[str]:
# return the data of image after convoluting process so we can check it out
lowercase__ : Any = np.asmatrix(a )
lowercase__ , lowercase__ : str = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Tuple = self.pooling(a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 77
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__a = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCamelCase , R'''\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ''' , )
class __a( _UpperCamelCase ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
if self.framework == "tf":
UpperCAmelCase_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=_UpperCAmelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : int = self.get_masked_index(_UpperCAmelCase )
UpperCAmelCase_ : List[str] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' ,self.model.base_model_prefix ,f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_UpperCAmelCase )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ) -> Any:
if return_tensors is None:
UpperCAmelCase_ : Union[str, Any] = self.framework
UpperCAmelCase_ : Any = self.tokenizer(_UpperCAmelCase ,return_tensors=_UpperCAmelCase )
self.ensure_exactly_one_mask_token(_UpperCAmelCase )
return model_inputs
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.model(**_UpperCAmelCase )
UpperCAmelCase_ : Tuple = model_inputs['input_ids']
return model_outputs
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ : List[str] = target_ids.shape[0]
UpperCAmelCase_ : Dict = model_outputs['input_ids'][0]
UpperCAmelCase_ : Optional[int] = model_outputs['logits']
if self.framework == "tf":
UpperCAmelCase_ : int = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ : List[str] = outputs.numpy()
UpperCAmelCase_ : List[Any] = outputs[0, masked_index, :]
UpperCAmelCase_ : Optional[Any] = stable_softmax(_UpperCAmelCase ,axis=-1 )
if target_ids is not None:
UpperCAmelCase_ : Optional[int] = tf.gather_nd(tf.squeeze(_UpperCAmelCase ,0 ) ,target_ids.reshape(-1 ,1 ) )
UpperCAmelCase_ : str = tf.expand_dims(_UpperCAmelCase ,0 )
UpperCAmelCase_ : List[str] = tf.math.top_k(_UpperCAmelCase ,k=_UpperCAmelCase )
UpperCAmelCase_ : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=_UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ : List[str] = outputs[0, masked_index, :]
UpperCAmelCase_ : int = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ : List[str] = probs[..., target_ids]
UpperCAmelCase_ : Union[str, Any] = probs.topk(_UpperCAmelCase )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Union[str, Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
UpperCAmelCase_ : Tuple = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ : Dict = target_ids[p].tolist()
UpperCAmelCase_ : Optional[Any] = p
# Filter padding out:
UpperCAmelCase_ : int = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ : int = self.tokenizer.decode(_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ : Union[str, Any] = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
if single_mask:
return result[0]
return result
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
UpperCAmelCase_ : Dict = [targets]
try:
UpperCAmelCase_ : str = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = []
for target in targets:
UpperCAmelCase_ : Tuple = vocab.get(_UpperCAmelCase ,_UpperCAmelCase )
if id_ is None:
UpperCAmelCase_ : List[Any] = self.tokenizer(
_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase ,max_length=1 ,truncation=_UpperCAmelCase ,)['input_ids']
if len(_UpperCAmelCase ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
UpperCAmelCase_ : int = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
UpperCAmelCase_ : Tuple = list(set(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
UpperCAmelCase_ : Optional[int] = np.array(_UpperCAmelCase )
return target_ids
def a__ ( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> str:
UpperCAmelCase_ : Dict = {}
if targets is not None:
UpperCAmelCase_ : Dict = self.get_target_ids(_UpperCAmelCase ,_UpperCAmelCase )
UpperCAmelCase_ : Optional[int] = target_ids
if top_k is not None:
UpperCAmelCase_ : Optional[Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' ,self.model.base_model_prefix ,'''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self ,_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : List[str] = super().__call__(_UpperCAmelCase ,**_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 369
|
from collections import defaultdict
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = first_str.lower().strip()
UpperCAmelCase_ : Any = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase_ : Any = first_str.replace(''' ''' , '''''' )
UpperCAmelCase_ : int = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_lowercase ) != len(_lowercase ):
return False
# Default values for count should be 0
UpperCAmelCase_ : defaultdict[str, int] = defaultdict(_lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__a = input('Enter the first string ').strip()
__a = input('Enter the second string ').strip()
__a = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 235
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__lowerCamelCase : Dict = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowerCamelCase : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
with open(lowerCAmelCase , "rb" ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open(lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class a__ :
A = field(
default=A__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A = field(default=A__ , metadata={'help': 'A folder containing the training data.'} )
A = field(default=A__ , metadata={'help': 'A folder containing the validation data.'} )
A = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class a__ :
A = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A = field(
default=A__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(A__ )} , )
A = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A = field(default=A__ , metadata={'help': 'Name or path of preprocessor config.'} )
A = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A = field(
default=A__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _snake_case ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.stack([example["pixel_values"] for example in examples] )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Any = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE_ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE_ : str = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE_ : int = os.path.join(data_args.validation_dir , "**" )
SCREAMING_SNAKE_CASE_ : Dict = load_dataset(
"imagefolder" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_ : Optional[Any] = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_ : str = dataset["train"].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_ : str = split["train"]
SCREAMING_SNAKE_CASE_ : List[Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE_ : Any = dataset["train"].features["labels"].names
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = {}, {}
for i, label in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE_ : Tuple = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel=lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE_ : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE_ : Tuple = image_processor.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_ : List[str] = (image_processor.size["height"], image_processor.size["width"])
SCREAMING_SNAKE_CASE_ : List[str] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE_ : Dict = Compose(
[
RandomResizedCrop(lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Compose(
[
Resize(lowerCAmelCase ),
CenterCrop(lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ : str = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ : int = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ : List[str] = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase )
# Initalize our trainer
SCREAMING_SNAKE_CASE_ : Dict = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = last_checkpoint
SCREAMING_SNAKE_CASE_ : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ : int = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase )
trainer.save_metrics("eval" , lowerCAmelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 18
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a_ : str = _symbol_database.Default()
a_ : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a_ : List[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a_ : List[str] = None
a_ : Tuple = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a_ : Optional[int] = 4_5
a_ : Union[str, Any] = 1_5_8_1
a_ : List[Any] = 1_5_1_7
a_ : str = 1_5_7_0
a_ : List[Any] = 1_5_8_4
a_ : str = 1_7_9_3
a_ : List[str] = 1_7_9_5
a_ : Any = 1_9_1_6
a_ : List[str] = 1_8_6_4
a_ : Optional[Any] = 1_9_0_5
a_ : int = 1_9_1_9
a_ : int = 2_4_2_9
a_ : Dict = 2_2_0_8
a_ : Any = 2_4_1_8
a_ : Union[str, Any] = 2_3_2_3
a_ : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 168
| 0
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : List[str] =16
lowerCAmelCase : Optional[Any] =32
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any] = 16 ):
lowercase_ :List[str] = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase_ :Tuple = load_dataset("glue" ,"mrpc" )
def tokenize_function(__lowerCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ :List[str] = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ :Any = datasets.map(
__lowerCamelCase ,batched=__lowerCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ :Union[str, Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__lowerCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ :List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ :int = 16
elif accelerator.mixed_precision != "no":
lowercase_ :List[Any] = 8
else:
lowercase_ :Optional[int] = None
return tokenizer.pad(
__lowerCamelCase ,padding="longest" ,max_length=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_tensors="pt" ,)
# Instantiate dataloaders.
lowercase_ :Optional[Any] = DataLoader(
tokenized_datasets["train"] ,shuffle=__lowerCamelCase ,collate_fn=__lowerCamelCase ,batch_size=__lowerCamelCase )
lowercase_ :List[str] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__lowerCamelCase ,collate_fn=__lowerCamelCase ,batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Tuple =mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : Dict ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__lowerCamelCase ) == "1":
lowercase_ :List[str] = 2
# Initialize accelerator
lowercase_ :Any = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ :Tuple = config["lr"]
lowercase_ :Dict = int(config["num_epochs"] )
lowercase_ :Any = int(config["seed"] )
lowercase_ :List[str] = int(config["batch_size"] )
lowercase_ :str = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase : Any ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ :int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ :str = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ :Union[str, Any] = AdamW(params=model.parameters() ,lr=__lowerCamelCase )
lowercase_ , lowercase_ :Union[str, Any] = get_dataloaders(__lowerCamelCase ,__lowerCamelCase )
# Instantiate scheduler
lowercase_ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(__lowerCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ :Union[str, Any] = accelerator.prepare(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ :str = model(**__lowerCamelCase )
lowercase_ :Tuple = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ :Any = model(**__lowerCamelCase )
lowercase_ :str = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__lowerCamelCase ,references=__lowerCamelCase ,)
lowercase_ :List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' ,__lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase_ ( ):
lowercase_ :Optional[int] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__lowerCamelCase ,default=__lowerCamelCase ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
lowercase_ :Tuple = parser.parse_args()
lowercase_ :Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__lowerCamelCase ,__lowerCamelCase )
if __name__ == "__main__":
main()
| 358
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _lowerCAmelCase ):
__A = (UniPCMultistepScheduler,)
__A = (("num_inference_steps", 25),)
def lowercase__ ( self : Tuple , **lowercase : Optional[Any] ):
"""simple docstring"""
lowercase_ :Any = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**lowercase )
return config
def lowercase__ ( self : Optional[Any] , lowercase : str=0 , **lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Tuple = dict(self.forward_default_kwargs )
lowercase_ :Tuple = kwargs.pop("num_inference_steps" , lowercase )
lowercase_ :Union[str, Any] = self.dummy_sample
lowercase_ :Union[str, Any] = 0.1 * sample
lowercase_ :Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ :Tuple = self.get_scheduler_config(**lowercase )
lowercase_ :Tuple = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
lowercase_ :str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
lowercase_ :str = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
lowercase_ :List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ , lowercase_ :Tuple = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
lowercase_ :Optional[int] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
lowercase_ :Union[str, Any] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self : str , lowercase : Any=0 , **lowercase : str ):
"""simple docstring"""
lowercase_ :Tuple = dict(self.forward_default_kwargs )
lowercase_ :Dict = kwargs.pop("num_inference_steps" , lowercase )
lowercase_ :str = self.dummy_sample
lowercase_ :Optional[int] = 0.1 * sample
lowercase_ :Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ :Any = self.get_scheduler_config()
lowercase_ :Tuple = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ :str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
lowercase_ :int = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ :Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ :Dict = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
lowercase_ :str = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self : List[str] , lowercase : Optional[int]=None , **lowercase : int ):
"""simple docstring"""
if scheduler is None:
lowercase_ :int = self.scheduler_classes[0]
lowercase_ :Optional[Any] = self.get_scheduler_config(**lowercase )
lowercase_ :Optional[Any] = scheduler_class(**lowercase )
lowercase_ :str = self.scheduler_classes[0]
lowercase_ :Optional[int] = self.get_scheduler_config(**lowercase )
lowercase_ :List[Any] = scheduler_class(**lowercase )
lowercase_ :str = 10
lowercase_ :List[str] = self.dummy_model()
lowercase_ :Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :int = model(lowercase , lowercase )
lowercase_ :Dict = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :str = dict(self.forward_default_kwargs )
lowercase_ :Union[str, Any] = kwargs.pop("num_inference_steps" , lowercase )
for scheduler_class in self.scheduler_classes:
lowercase_ :Optional[Any] = self.get_scheduler_config()
lowercase_ :Optional[int] = scheduler_class(**lowercase )
lowercase_ :Union[str, Any] = self.dummy_sample
lowercase_ :str = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , "set_timesteps" ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , "set_timesteps" ):
lowercase_ :str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase_ :Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase_ :Any = dummy_past_residuals[: scheduler.config.solver_order]
lowercase_ :Optional[int] = scheduler.timesteps[5]
lowercase_ :List[Any] = scheduler.timesteps[6]
lowercase_ :Optional[Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
lowercase_ :List[str] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :List[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowercase_ :Dict = self.full_loop(scheduler=lowercase )
lowercase_ :List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
lowercase_ :List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase_ :Tuple = DEISMultistepScheduler.from_config(scheduler.config )
lowercase_ :Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase_ :Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase_ :Tuple = self.full_loop(scheduler=lowercase )
lowercase_ :Tuple = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def lowercase__ ( self : Tuple ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def lowercase__ ( self : int ):
"""simple docstring"""
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , solver_order=lowercase , solver_type=lowercase , )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
lowercase_ :Tuple = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :List[str] = self.full_loop()
lowercase_ :List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Dict = self.full_loop(prediction_type="v_prediction" )
lowercase_ :Any = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.10_14 ) < 1e-3
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :str = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
lowercase_ :Tuple = scheduler_class(**lowercase )
lowercase_ :Optional[int] = 10
lowercase_ :Optional[int] = self.dummy_model()
lowercase_ :Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Tuple = model(lowercase , lowercase )
lowercase_ :Any = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
def lowercase__ ( self : List[Any] , **lowercase : Union[str, Any] ):
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
lowercase_ :Optional[int] = self.get_scheduler_config(**lowercase )
lowercase_ :Dict = scheduler_class(**lowercase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 147
| 0
|
"""simple docstring"""
import baseaa
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_UpperCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase__ ( _UpperCamelCase : Any="ro" , _UpperCamelCase : Optional[Any]="en" , _UpperCamelCase : Any="wmt16" , _UpperCamelCase : Tuple=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
snake_case = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
snake_case = datasets.load_dataset(_UpperCamelCase , _UpperCamelCase )
if save_dir is None:
snake_case = f"""{dataset}-{pair}"""
snake_case = Path(_UpperCamelCase )
save_dir.mkdir(exist_ok=_UpperCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
snake_case = 'val' if split == 'validation' else split
snake_case = save_dir.joinpath(f"""{fn}.source""" )
snake_case = save_dir.joinpath(f"""{fn}.target""" )
snake_case = src_path.open('w+' )
snake_case = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 150
| 0
|
from __future__ import annotations
import pandas as pd
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = burst_time[i]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 9_99_99_99_99
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__UpperCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE__ = remaining_time[j]
SCREAMING_SNAKE_CASE__ = j
SCREAMING_SNAKE_CASE__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE__ = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE__ = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE__ = False
# Find finish time of current process
SCREAMING_SNAKE_CASE__ = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE__ = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE__ = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE__ = 0
# Increment time
increment_time += 1
return waiting_time
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = burst_time[i] + waiting_time[i]
return turn_around_time
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE__ = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
__lowerCamelCase : int = int(input())
__lowerCamelCase : Optional[Any] = [0] * no_of_processes
__lowerCamelCase : List[Any] = [0] * no_of_processes
__lowerCamelCase : Tuple = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
__lowerCamelCase , __lowerCamelCase : Optional[Any] = map(int, input().split())
__lowerCamelCase : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCamelCase : int = burst_time
__lowerCamelCase : str = no_of_processes
__lowerCamelCase : List[str] = waiting_time
__lowerCamelCase : List[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowerCamelCase : Optional[int] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 204
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ["input_features", "attention_mask"]
def __init__( self : int , _lowercase : Dict=80 , _lowercase : Union[str, Any]=1_60_00 , _lowercase : str=0.0 , _lowercase : int=10 , _lowercase : str=25 , _lowercase : Union[str, Any]="hamming_window" , _lowercase : Dict=3_27_68.0 , _lowercase : Optional[int]=0.97 , _lowercase : Optional[int]=1.0 , _lowercase : int=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=False , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = frame_signal_scale
SCREAMING_SNAKE_CASE__ = preemphasis_coeff
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = normalize_means
SCREAMING_SNAKE_CASE__ = normalize_vars
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = return_attention_mask
SCREAMING_SNAKE_CASE__ = win_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE__ = hop_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE__ = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE__ = (self.n_fft // 2) + 1
def __a ( self : str , _lowercase : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowercase )
else:
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE__ = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowercase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowercase , preemphasis=self.preemphasis_coeff , mel_filters=_lowercase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __a ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : int ):
"""simple docstring"""
if self.normalize_means:
SCREAMING_SNAKE_CASE__ = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ = np.subtract(_lowercase , _lowercase )
if self.normalize_vars:
SCREAMING_SNAKE_CASE__ = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ = np.divide(_lowercase , _lowercase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ = x.astype(np.floataa )
return x
def __a ( self : int , _lowercase : List[np.ndarray] , _lowercase : Optional[np.ndarray] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowercase , _lowercase , self.padding_value ) for x, n in zip(_lowercase , _lowercase )]
def __call__( self : Dict , _lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Optional[int] = None , _lowercase : bool = False , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[int] = None , **_lowercase : List[str] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
SCREAMING_SNAKE_CASE__ = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ = [self._extract_mfsc_features(_lowercase ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = BatchFeature({"""input_features""": features} )
SCREAMING_SNAKE_CASE__ = self.pad(
_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _lowercase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE__ = (
np.array(_lowercase , dtype=np.intaa )
if self._get_padding_strategies(_lowercase , max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE__ = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_lowercase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 204
| 1
|
'''simple docstring'''
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
snake_case_ = 6
snake_case_ = 1
snake_case_ = 1901
snake_case_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
snake_case_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
snake_case_ = day - days_per_month[month - 2]
if month > 12:
year += 1
snake_case_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 56
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : int = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if exitstatus == 5:
snake_case_ = 0
# Doctest custom flag to ignore output.
a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT')
a : Optional[int] = doctest.OutputChecker
class a ( _lowerCamelCase ):
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
a : List[Any] = CustomOutputChecker
a : Optional[int] = HfDoctestModule
a : Tuple = HfDocTestParser
| 56
| 1
|
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE : Optional[int] = '''docs/source/en/_toctree.yml'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = defaultdict(_A )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(_A )
SCREAMING_SNAKE_CASE__ = new_doc_list
SCREAMING_SNAKE_CASE__ = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE__ = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
SCREAMING_SNAKE_CASE__ = sorted(_A , key=lambda _A : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_A ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(_A )
# Sort
return overview_doc
def UpperCAmelCase_ ( _A=False ):
'''simple docstring'''
with open(_A , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE__ = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
SCREAMING_SNAKE_CASE__ = api_doc[scheduler_idx]['''sections''']
SCREAMING_SNAKE_CASE__ = clean_doc_toc(_A )
SCREAMING_SNAKE_CASE__ = False
if new_scheduler_doc != scheduler_doc:
SCREAMING_SNAKE_CASE__ = True
if overwrite:
SCREAMING_SNAKE_CASE__ = new_scheduler_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE__ = api_doc
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def UpperCAmelCase_ ( _A=False ):
'''simple docstring'''
with open(_A , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE__ = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = api_doc[pipeline_idx]['''sections''']
SCREAMING_SNAKE_CASE__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
SCREAMING_SNAKE_CASE__ = pipeline_doc['''section''']
SCREAMING_SNAKE_CASE__ = clean_doc_toc(_A )
if overwrite:
SCREAMING_SNAKE_CASE__ = new_sub_pipeline_doc
new_pipeline_docs.append(_A )
# sort overall pipeline doc
SCREAMING_SNAKE_CASE__ = clean_doc_toc(_A )
if new_pipeline_docs != pipeline_docs:
SCREAMING_SNAKE_CASE__ = True
if overwrite:
SCREAMING_SNAKE_CASE__ = new_pipeline_docs
if diff:
if overwrite:
SCREAMING_SNAKE_CASE__ = api_doc
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 218
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_SCREAMING_SNAKE_CASE : str = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_SCREAMING_SNAKE_CASE : int = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_SCREAMING_SNAKE_CASE : int = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
_SCREAMING_SNAKE_CASE : str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
_SCREAMING_SNAKE_CASE : Tuple = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if isinstance(_A , _A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def UpperCAmelCase_ ( _A , _A , _A , _A , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.skip_connection.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def UpperCAmelCase_ ( _A , _A , _A , _A , _A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.norm.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.norm.bias''']
SCREAMING_SNAKE_CASE__ = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location='''cpu''' )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.2.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE__ = checkpoint['''label_emb.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_blocks.0.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_blocks.0.0.bias''']
SCREAMING_SNAKE_CASE__ = unet_config['''down_block_types''']
SCREAMING_SNAKE_CASE__ = unet_config['''layers_per_block''']
SCREAMING_SNAKE_CASE__ = unet_config['''attention_head_dim''']
SCREAMING_SNAKE_CASE__ = unet_config['''block_out_channels''']
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = channels_list[0]
for i, layer_type in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = channels_list[i]
SCREAMING_SNAKE_CASE__ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_A ):
SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_A ):
SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A )
SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE__ = convert_attention(
_A , _A , _A , _A , _A )
current_layer += 1
if i != len(_A ) - 1:
SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.downsamplers.0'''
SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
current_layer += 1
SCREAMING_SNAKE_CASE__ = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE__ = '''mid_block.resnets.0'''
SCREAMING_SNAKE_CASE__ = '''middle_block.0'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = '''mid_block.attentions.0'''
SCREAMING_SNAKE_CASE__ = '''middle_block.1'''
SCREAMING_SNAKE_CASE__ = convert_attention(_A , _A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = '''mid_block.resnets.1'''
SCREAMING_SNAKE_CASE__ = '''middle_block.2'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = unet_config['''up_block_types''']
for i, layer_type in enumerate(_A ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A )
current_layer += 1
if i != len(_A ) - 1:
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer-1}.1'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A )
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE__ = convert_attention(
_A , _A , _A , _A , _A )
current_layer += 1
if i != len(_A ) - 1:
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer-1}.2'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = checkpoint['''out.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.2.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[str] = strabool(args.class_cond)
_SCREAMING_SNAKE_CASE : int = os.path.basename(args.unet_path)
print(F"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_SCREAMING_SNAKE_CASE : int = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TEST_UNET_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : int = con_pt_to_diffuser(args.unet_path, unet_config)
_SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_SCREAMING_SNAKE_CASE : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_SCREAMING_SNAKE_CASE : int = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
_SCREAMING_SNAKE_CASE : int = CMStochasticIterativeScheduler(**scheduler_config)
_SCREAMING_SNAKE_CASE : str = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 218
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase__ :List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
lowercase__ :Dict = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase__ :List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = Image.open(lowerCAmelCase__ )
return im.convert('''RGB''' )
@dataclass
class lowercase :
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase_ : Optional[str] =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowercase_ : Optional[str] =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowercase_ : Optional[float] =field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowercase_ : Optional[int] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowercase_ : Optional[int] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def A__ ( self):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''')
@dataclass
class lowercase :
lowercase_ : str =field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE__ )} , )
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
lowercase_ : str =field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowercase_ : str =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowercase_ : bool =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowercase_ : bool =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCamelCase ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase = {}
if data_args.train_dir is not None:
lowercase = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
lowercase = os.path.join(data_args.validation_dir , '''**''' )
lowercase = load_dataset(
'''imagefolder''' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase = dataset['''train'''].train_test_split(data_args.train_val_split )
lowercase = split['''train''']
lowercase = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = dataset['''train'''].features['''labels'''].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
lowercase = str(lowerCAmelCase__ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase = image_processor.size['''shortest_edge''']
else:
lowercase = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase = Compose(
[
RandomResizedCrop(lowerCAmelCase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase = Compose(
[
Resize(lowerCAmelCase__ ),
CenterCrop(lowerCAmelCase__ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase__ ):
lowercase = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(lowerCAmelCase__ ):
lowercase = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase__ )
# Initalize our trainer
lowercase = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 101
|
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : List[Any] = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : Optional[Any] = 2 * i + 1
_a : Optional[int] = 2 * i
_a : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 235
| 0
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : int = logging.getLogger()
lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : List[str] , A_ : Optional[Any] ) -> int:
"""simple docstring"""
os.makedirs(A_ , exist_ok=A_ )
lowerCamelCase_ = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(A_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(A_ )
def a__ ( self : Optional[Any] , A_ : int , A_ : str = "pytorch" ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = os.path.join(A_ , 'output' )
lowerCamelCase_ = os.path.join(A_ , 'data' )
self._create_dummy_data(data_dir=A_ )
lowerCamelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A_ , env=self.get_env() )
lowerCamelCase_ = os.path.join(A_ , 'metrics.json' )
with open(A_ ) as f:
lowerCamelCase_ = json.load(A_ )
return result
@require_torch_gpu
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 354
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[Any] = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : Tuple = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
lowerCamelCase_ = None
if _re_tf_models.match(lowercase ) is not None:
lowerCamelCase_ = tf_models
lowerCamelCase_ = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
lowerCamelCase_ = flax_models
lowerCamelCase_ = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
lowerCamelCase_ = pt_models
lowerCamelCase_ = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCamelCase_ = True
break
# Try again after removing the last word in the name
lowerCamelCase_ = ''.join(camel_case_split(lowercase )[:-1] )
lowerCamelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCamelCase_ = list(lowercase )
all_models.sort()
lowerCamelCase_ = {'model_type': all_models}
lowerCamelCase_ = [pt_models[t] for t in all_models]
lowerCamelCase_ = [tf_models[t] for t in all_models]
lowerCamelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCamelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCamelCase_ = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCamelCase_ = 'AutoTokenizer'
lowerCamelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCamelCase_ = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowerCamelCase_ = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase , lowercase , lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase , lowercase ):
continue
# First extract all model_names
lowerCamelCase_ = []
for name in getattr(lowercase , lowercase ).values():
if isinstance(lowercase , lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = get_frameworks_table()
lowerCamelCase_ = Dataset.from_pandas(lowercase )
lowerCamelCase_ = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase )
lowerCamelCase_ = Dataset.from_json(lowercase )
lowerCamelCase_ = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase ) )
}
lowerCamelCase_ = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCamelCase_ = sorted(table.keys() )
lowerCamelCase_ = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
lowerCamelCase_ = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
lowerCamelCase_ = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowerCamelCase_ = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase , repo_type='dataset' , token=lowercase , commit_message=lowercase , )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCamelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCamelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCamelCase_ = pipeline_tasks[key]['pt']
if isinstance(lowercase , (list, tuple) ):
lowerCamelCase_ = model[0]
lowerCamelCase_ = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = ', '.join(lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
lowerCamelCase : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 208
| 0
|
'''simple docstring'''
import numpy as np
def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27
|
import random
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = num - 1
UpperCAmelCase_: Optional[int] = 0
while s % 2 == 0:
UpperCAmelCase_: Union[str, Any] = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase_: str = random.randrange(2 , num - 1 )
UpperCAmelCase_: Optional[Any] = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if v != 1:
UpperCAmelCase_: Dict = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase_: Optional[int] = i + 1
UpperCAmelCase_: int = (v**2) % num
return True
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
if num < 2:
return False
UpperCAmelCase_: Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: int = 1_0_2_4 ):
"""simple docstring"""
while True:
UpperCAmelCase_: List[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase__ ):
return num
if __name__ == "__main__":
a : Optional[int] = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 147
| 0
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case__ : Any = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _a ( lowerCamelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase )
def _a ( lowerCamelCase: str ) -> Tuple:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__A = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase , id=lowerCamelCase )
| 361
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Dict = {'vocab_file': 'vocab.txt'}
snake_case__ : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
snake_case__ : Optional[int] = {
'openbmb/cpm-ant-10b': 1024,
}
def _a ( lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = collections.OrderedDict()
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
__A = reader.readlines()
for index, token in enumerate(lowerCamelCase ):
__A = token.rstrip('''\n''' )
__A = index
return vocab
class A_ ( _lowerCamelCase ):
def __init__(self :Any , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int]="<unk>" , _UpperCamelCase :List[str]=200 )-> List[str]:
__A = vocab
__A = unk_token
__A = max_input_chars_per_word
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[Any] )-> str:
__A = list(_UpperCamelCase )
if len(_UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__A = 0
__A = []
while start < len(_UpperCamelCase ):
__A = len(_UpperCamelCase )
__A = None
while start < end:
__A = ''''''.join(chars[start:end] )
if substr in self.vocab:
__A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_UpperCamelCase )
__A = end
return sub_tokens
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = False
def __init__(self :str , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Any="<d>" , _UpperCamelCase :List[str]="</d>" , _UpperCamelCase :Dict="<s>" , _UpperCamelCase :Optional[Any]="</s>" , _UpperCamelCase :Optional[int]="<pad>" , _UpperCamelCase :List[str]="<unk>" , _UpperCamelCase :str="</n>" , _UpperCamelCase :Optional[int]="</_>" , _UpperCamelCase :Optional[Any]="left" , **_UpperCamelCase :Any , )-> Union[str, Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_UpperCamelCase , eod_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , unk_token=_UpperCamelCase , line_token=_UpperCamelCase , space_token=_UpperCamelCase , padding_side=_UpperCamelCase , **_UpperCamelCase , )
__A = bod_token
__A = eod_token
__A = load_vocab(_UpperCamelCase )
__A = self.encoder[space_token]
__A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
__A = {v: k for k, v in self.encoder.items()}
__A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
return self.encoder[self.bod_token]
@property
def _lowerCAmelCase (self :Optional[int] )-> Dict:
return self.encoder[self.eod_token]
@property
def _lowerCAmelCase (self :Any )-> List[Any]:
return self.encoder["\n"]
@property
def _lowerCAmelCase (self :List[str] )-> int:
return len(self.encoder )
def _lowerCAmelCase (self :List[str] )-> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Dict )-> Union[str, Any]:
__A = []
for x in jieba.cut(_UpperCamelCase , cut_all=_UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCamelCase ) )
return output_tokens
def _lowerCAmelCase (self :str , _UpperCamelCase :int , **_UpperCamelCase :List[str] )-> Tuple:
__A = [i for i in token_ids if i >= 0]
__A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Optional[int] )-> List[str]:
return token in self.encoder
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[str] )-> str:
return "".join(_UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> List[Any]:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase (self :Any , _UpperCamelCase :Tuple )-> int:
return self.decoder.get(_UpperCamelCase , self.unk_token )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
if os.path.isdir(_UpperCamelCase ):
__A = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__A = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__A = 0
if " " in self.encoder:
__A = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__A = self.encoder['''\n''']
del self.encoder["\n"]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__A = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[int] , _UpperCamelCase :List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None , _UpperCamelCase :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase ))
| 250
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : int = 3 , lowercase : int = 7 , lowercase : int = 1_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase_ = current_numerator
lowerCamelCase_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 204
|
from collections.abc import Callable
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : Callable , lowercase : float , lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowercase ):
lowerCamelCase_ = y[k] + step_size * ode_func(lowercase , y[k] )
lowerCamelCase_ = y[k] + (
(step_size / 2) * (ode_func(lowercase , y[k] ) + ode_func(x + step_size , lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
return max(metric_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = []
if args.gold_data_mode == "qa":
lowerCAmelCase = pd.read_csv(_SCREAMING_SNAKE_CASE , sep="""\t""" , header=_SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
lowerCAmelCase = ast.literal_eval(_SCREAMING_SNAKE_CASE )
answers.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = [[reference] for reference in references]
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0
for prediction, ground_truths in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = 100.0 * em / total
lowerCAmelCase = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = args.k
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = lowerCAmelCase = 0
for hypo, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = set(hypo.split("""\t""" )[:k] )
lowerCAmelCase = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCAmelCase = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
def strip_title(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if title.startswith("""\"""" ):
lowerCAmelCase = title[1:]
if title.endswith("""\"""" ):
lowerCAmelCase = title[:-1]
return title
lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
lowerCAmelCase = rag_model.rag.question_encoder(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = question_enc_outputs[0]
lowerCAmelCase = rag_model.retriever(
_SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCAmelCase = []
for docs in all_docs:
lowerCAmelCase = [strip_title(_SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(_SCREAMING_SNAKE_CASE ) )
return provenance_strings
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs_dict.input_ids.to(args.device )
lowerCAmelCase = inputs_dict.attention_mask.to(args.device )
lowerCAmelCase = rag_model.generate( # rag_model overwrites generate
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return answers
def _snake_case ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=_SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=_SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=_SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=_SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=_SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=_SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=_SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=_SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=_SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=_SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = {}
if args.model_type is None:
lowerCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
lowerCAmelCase = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
lowerCAmelCase = args.n_docs
if args.index_name is not None:
lowerCAmelCase = args.index_name
if args.index_path is not None:
lowerCAmelCase = args.index_path
else:
lowerCAmelCase = BartForConditionalGeneration
lowerCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
lowerCAmelCase = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(_SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
lowerCAmelCase = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , retriever=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
lowerCAmelCase = []
for line in tqdm(_SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(_SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
lowerCAmelCase = []
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase = get_args()
main(args)
| 187
| 0
|
import qiskit
def UpperCamelCase_( _snake_case : int = 2 ):
"""simple docstring"""
__a =qubits
# Using Aer's simulator
__a =qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
__a =qiskit.QuantumCircuit(_snake_case , _snake_case )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _snake_case ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _snake_case )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_snake_case ) ) , list(range(_snake_case ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__a =qiskit.execute(_snake_case , _snake_case , shots=1000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 218
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 218
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = 'mgp-str'
def __init__( self: List[str] , UpperCamelCase_: str=[32, 1_28] , UpperCamelCase_: Any=4 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: int=27 , UpperCamelCase_: Optional[int]=38 , UpperCamelCase_: List[Any]=5_02_57 , UpperCamelCase_: List[Any]=3_05_22 , UpperCamelCase_: Dict=7_68 , UpperCamelCase_: Dict=12 , UpperCamelCase_: Any=12 , UpperCamelCase_: List[str]=4.0 , UpperCamelCase_: str=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Dict=0.02 , **UpperCamelCase_: List[Any] , ):
super().__init__(**lowerCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = max_token_length
__lowerCamelCase = num_character_labels
__lowerCamelCase = num_bpe_labels
__lowerCamelCase = num_wordpiece_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = mlp_ratio
__lowerCamelCase = distilled
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = drop_rate
__lowerCamelCase = qkv_bias
__lowerCamelCase = attn_drop_rate
__lowerCamelCase = drop_path_rate
__lowerCamelCase = output_aa_attentions
__lowerCamelCase = initializer_range
| 360
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 0
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=sys.maxsize ):
"""simple docstring"""
lowerCAmelCase : List[str] = 'bilinear'
lowerCAmelCase : Any = max_size
lowerCAmelCase : List[str] = short_edge_length
def __call__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = []
for img in imgs:
lowerCAmelCase : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCAmelCase : List[str] = size * 1.0 / min(_a , _a )
if h < w:
lowerCAmelCase : List[Any] = size, scale * w
else:
lowerCAmelCase : Any = scale * h, size
if max(_a , _a ) > self.max_size:
lowerCAmelCase : Dict = self.max_size * 1.0 / max(_a , _a )
lowerCAmelCase : Tuple = newh * scale
lowerCAmelCase : Optional[int] = neww * scale
lowerCAmelCase : str = int(neww + 0.5 )
lowerCAmelCase : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCAmelCase : int = Image.fromarray(_a )
lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCAmelCase : Optional[Any] = np.asarray(_a )
else:
lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCAmelCase : Tuple = nn.functional.interpolate(
_a , (newh, neww) , mode=self.interp_method , align_corners=_a ).squeeze(0 )
img_augs.append(_a )
return img_augs
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCAmelCase : Dict = cfg.INPUT.FORMAT
lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY
lowerCAmelCase : Optional[int] = cfg.PAD_VALUE
lowerCAmelCase : List[str] = cfg.INPUT.MAX_SIZE_TEST
lowerCAmelCase : Dict = cfg.MODEL.DEVICE
lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : List[str] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : Optional[int] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = tuple(max(_a ) for s in zip(*[img.shape for img in images] ) )
lowerCAmelCase : Union[str, Any] = [im.shape[-2:] for im in images]
lowerCAmelCase : List[Any] = [
nn.functional.pad(
_a , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_a , _a )
]
return torch.stack(_a ), torch.tensor(_a )
def __call__( self , snake_case__ , snake_case__=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(_a , _a ):
lowerCAmelCase : Tuple = [images]
if single_image:
assert len(_a ) == 1
for i in range(len(_a ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_a , images.pop(_a ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_a , torch.as_tensor(img_tensorize(images.pop(_a ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCAmelCase : str = torch.tensor([im.shape[:2] for im in images] )
lowerCAmelCase : Dict = self.aug(_a )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCAmelCase : Dict = [self.normalizer(_a ) for x in images]
# now pad them to do the following operations
lowerCAmelCase : List[Any] = self.pad(_a )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCAmelCase : Dict = torch.true_divide(_a , _a )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
assert torch.isfinite(_lowerCAmelCase ).all(), "Box tensor contains infinite or NaN!"
lowerCAmelCase : List[str] = box_size
tensor[:, 0].clamp_(min=0 , max=_lowerCAmelCase )
tensor[:, 1].clamp_(min=0 , max=_lowerCAmelCase )
tensor[:, 2].clamp_(min=0 , max=_lowerCAmelCase )
tensor[:, 3].clamp_(min=0 , max=_lowerCAmelCase )
| 108
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ) -> Tuple:
__lowerCamelCase : Optional[int] = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase : List[Any] = 1024
__lowerCamelCase : Union[str, Any] = 4096
__lowerCamelCase : Any = 24
__lowerCamelCase : List[str] = 16
__lowerCamelCase : int = [5, 11, 17, 23]
__lowerCamelCase : List[Any] = [256, 512, 1024, 1024]
__lowerCamelCase : Tuple = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase : Tuple = True
__lowerCamelCase : Union[str, Any] = 150
__lowerCamelCase : Any = 'huggingface/label-files'
__lowerCamelCase : Dict = 'ade20k-id2label.json'
__lowerCamelCase : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase ,_lowerCAmelCase ,repo_type='dataset' ) ) ,'r' ) )
__lowerCamelCase : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : List[Any] = idalabel
__lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
__lowerCamelCase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def a_ ( _lowerCAmelCase ) -> Tuple:
__lowerCamelCase : Optional[int] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase )
def a_ ( _lowerCAmelCase ) -> Dict:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase : str = name.replace('pretrained.model' ,'dpt.encoder' )
if "pretrained.model" in name:
__lowerCamelCase : Union[str, Any] = name.replace('pretrained.model' ,'dpt.embeddings' )
if "patch_embed" in name:
__lowerCamelCase : int = name.replace('patch_embed' ,'patch_embeddings' )
if "pos_embed" in name:
__lowerCamelCase : Optional[Any] = name.replace('pos_embed' ,'position_embeddings' )
if "attn.proj" in name:
__lowerCamelCase : Union[str, Any] = name.replace('attn.proj' ,'attention.output.dense' )
if "proj" in name and "project" not in name:
__lowerCamelCase : List[str] = name.replace('proj' ,'projection' )
if "blocks" in name:
__lowerCamelCase : Optional[int] = name.replace('blocks' ,'layer' )
if "mlp.fc1" in name:
__lowerCamelCase : Dict = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCamelCase : int = name.replace('mlp.fc2' ,'output.dense' )
if "norm1" in name:
__lowerCamelCase : Optional[int] = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
__lowerCamelCase : str = name.replace('norm2' ,'layernorm_after' )
if "scratch.output_conv" in name:
__lowerCamelCase : int = name.replace('scratch.output_conv' ,'head' )
if "scratch" in name:
__lowerCamelCase : Any = name.replace('scratch' ,'neck' )
if "layer1_rn" in name:
__lowerCamelCase : List[str] = name.replace('layer1_rn' ,'convs.0' )
if "layer2_rn" in name:
__lowerCamelCase : str = name.replace('layer2_rn' ,'convs.1' )
if "layer3_rn" in name:
__lowerCamelCase : List[Any] = name.replace('layer3_rn' ,'convs.2' )
if "layer4_rn" in name:
__lowerCamelCase : Optional[Any] = name.replace('layer4_rn' ,'convs.3' )
if "refinenet" in name:
__lowerCamelCase : Any = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase : Tuple = name.replace(F'refinenet{layer_idx}' ,F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase : Any = name.replace('out_conv' ,'projection' )
if "resConfUnit1" in name:
__lowerCamelCase : Optional[Any] = name.replace('resConfUnit1' ,'residual_layer1' )
if "resConfUnit2" in name:
__lowerCamelCase : List[str] = name.replace('resConfUnit2' ,'residual_layer2' )
if "conv1" in name:
__lowerCamelCase : Any = name.replace('conv1' ,'convolution1' )
if "conv2" in name:
__lowerCamelCase : Optional[Any] = name.replace('conv2' ,'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase : str = name.replace('pretrained.act_postprocess1.0.project.0' ,'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.0.project.0' ,'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess3.0.project.0' ,'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase : List[Any] = name.replace('pretrained.act_postprocess4.0.project.0' ,'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase : Optional[int] = name.replace('pretrained.act_postprocess1.3' ,'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase : Dict = name.replace('pretrained.act_postprocess1.4' ,'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase : Dict = name.replace('pretrained.act_postprocess2.3' ,'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase : Any = name.replace('pretrained.act_postprocess2.4' ,'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess3.3' ,'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase : List[Any] = name.replace('pretrained.act_postprocess4.3' ,'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase : Optional[int] = name.replace('pretrained.act_postprocess4.4' ,'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__lowerCamelCase : Union[str, Any] = name.replace('pretrained' ,'dpt' )
if "bn" in name:
__lowerCamelCase : Union[str, Any] = name.replace('bn' ,'batch_norm' )
if "head" in name:
__lowerCamelCase : Dict = name.replace('head' ,'head.head' )
if "encoder.norm" in name:
__lowerCamelCase : str = name.replace('encoder.norm' ,'layernorm' )
if "auxlayer" in name:
__lowerCamelCase : int = name.replace('auxlayer' ,'auxiliary_head.head' )
return name
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : Union[str, Any] = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase : Optional[Any] = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase : List[Any] = in_proj_bias[: config.hidden_size]
__lowerCamelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def a_ ( ) -> Optional[int]:
__lowerCamelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase : Dict = Image.open(requests.get(_lowerCAmelCase ,stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase ,__lowerCamelCase : List[Any] = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
__lowerCamelCase : str = torch.hub.load_state_dict_from_url(_lowerCAmelCase ,map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase : int = state_dict.pop(_lowerCAmelCase )
__lowerCamelCase : List[str] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase ,_lowerCAmelCase )
# load HuggingFace model
__lowerCamelCase : Tuple = DPTForSemanticSegmentation(_lowerCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
__lowerCamelCase : Dict = 480 if 'ade' in checkpoint_url else 384
__lowerCamelCase : Dict = DPTImageProcessor(size=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(_lowerCAmelCase ,return_tensors='pt' )
# forward pass
__lowerCamelCase : List[Any] = model(**_lowerCAmelCase ).logits if 'ade' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
# Assert logits
__lowerCamelCase : Optional[int] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__lowerCamelCase : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(_lowerCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] ,_lowerCAmelCase )
)
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase ,_lowerCAmelCase ) ,organization='nielsr' ,commit_message='Add model' ,use_temp_dir=_lowerCAmelCase ,)
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase ,_lowerCAmelCase ) ,organization='nielsr' ,commit_message='Add image processor' ,use_temp_dir=_lowerCAmelCase ,)
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
_UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 208
| 0
|
from __future__ import annotations
from collections import namedtuple
def lowerCamelCase__ ( a , a , a ) -> Tuple:
_A: Optional[int] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase__ : str = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
lowercase_ : Union[str, Any] = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE ).content , '''html.parser''' )
lowercase_ : List[Any] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowercase_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowercase : Optional[int] = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 3_0,
"pages": "3979-3990",
"year": 2_0_1_8,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 93
|
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> float:
_lowercase : Optional[Any] = 0.00
_lowercase : Dict = 0
for resistor in resistors:
if resistor <= 0:
_lowercase : Union[str, Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(snake_case )
first_sum += 1 / float(snake_case )
index += 1
return 1 / first_sum
def _A ( snake_case ) -> float:
_lowercase : Dict = 0.00
_lowercase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowercase : Dict = F'''Resistor at index {index} has a negative value!'''
raise ValueError(snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
| 0
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def _UpperCamelCase ( snake_case__ ) -> Optional[MinHash]:
if len(snake_case__ ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase : List[str] = MinHash(num_perm=snake_case__ )
for token in set(snake_case__ ):
min_hash.update(token.encode() )
return min_hash
def _UpperCamelCase ( snake_case__ ) -> Set[str]:
return {t for t in NON_ALPHA.split(snake_case__ ) if len(t.strip() ) > 0}
class _snake_case :
def __init__( self: List[Any] , *,
__lowerCamelCase: float = 0.85 , ) -> Any:
__UpperCAmelCase : Union[str, Any] = duplication_jaccard_threshold
__UpperCAmelCase : Dict = NUM_PERM
__UpperCAmelCase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCAmelCase : Tuple = defaultdict(__lowerCamelCase )
def _lowerCamelCase ( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: MinHash ) -> None:
__UpperCAmelCase : Any = self._index.query(__lowerCamelCase )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowerCamelCase )
def _lowerCamelCase ( self: int ) -> List[List[Dict]]:
__UpperCAmelCase : str = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase : int = [base] + list(__lowerCamelCase )
# reformat the cluster to be a list of dict
__UpperCAmelCase : Union[str, Any] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(__lowerCamelCase )
return duplicate_clusters
def _lowerCamelCase ( self: str , __lowerCamelCase: int ) -> None:
__UpperCAmelCase : List[Any] = self.get_duplicate_clusters()
with open(__lowerCamelCase , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Tuple = element
__UpperCAmelCase : Union[str, Any] = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash, ThreadedIterator(snake_case__, max_queue_size=1_0000 ), chunksize=100, ):
if data is not None:
yield data
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple:
__UpperCAmelCase : Any = DuplicationIndex(duplication_jaccard_threshold=snake_case__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case__ ) ), max_queue_size=100 ) ):
di.add(snake_case__, snake_case__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
__UpperCAmelCase : List[Any] = get_tokens(snake_case__ )
__UpperCAmelCase : List[Any] = get_tokens(snake_case__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Tuple = []
for elementa in cluster:
__UpperCAmelCase : List[Any] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__UpperCAmelCase : Union[str, Any] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(snake_case__, snake_case__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase : Any = 1
extremes.append(snake_case__ )
return extremes
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
global _shared_dataset
__UpperCAmelCase : Any = dataset
__UpperCAmelCase : str = []
__UpperCAmelCase : Dict = partial(_find_cluster_extremes_shared, jaccard_threshold=snake_case__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
snake_case__, snake_case__, ), total=len(snake_case__ ), ):
extremes_list.append(snake_case__ )
return extremes_list
def _UpperCamelCase ( snake_case__, snake_case__ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__UpperCAmelCase : Any = make_duplicate_clusters(snake_case__, snake_case__ )
__UpperCAmelCase : Any = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : int = find_extremes(snake_case__, snake_case__, snake_case__ )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase : Union[str, Any] = element
__UpperCAmelCase : Tuple = duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase : List[Any] = dataset.filter(lambda snake_case__, snake_case__ : idx not in remove_indices, with_indices=snake_case__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase : Union[str, Any] = element["base_index"] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase : Union[str, Any] = extreme_dict[element["base_index"]]["copies"]
print(f'''Original dataset size: {len(snake_case__ )}''' )
print(f'''Number of duplicate clusters: {len(snake_case__ )}''' )
print(f'''Files in duplicate cluster: {len(snake_case__ )}''' )
print(f'''Unique files in duplicate cluster: {len(snake_case__ )}''' )
print(f'''Filtered dataset size: {len(snake_case__ )}''' )
return ds_filter, duplicate_clusters
| 342
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342
| 1
|
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__lowercase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__lowercase )
def snake_case__ ( self : Optional[int] , **__lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case_ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
snake_case_ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
snake_case_ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
snake_case_ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
snake_case_ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case_ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
snake_case_ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
snake_case_ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
snake_case_ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
snake_case_ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
snake_case_ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
snake_case_ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , __lowercase : List[str] , *__lowercase : Optional[Any] , __lowercase : Dict=None , __lowercase : List[str]=None , **__lowercase : Optional[Any] ):
"""simple docstring"""
return super().__call__(__lowercase , *__lowercase , num_workers=__lowercase , batch_size=__lowercase , **__lowercase )
def snake_case__ ( self : str , __lowercase : int , __lowercase : List[str]=64 , __lowercase : int = 0 , __lowercase : float = 5_12 / 15_00 , __lowercase : Optional[int] = 32 , __lowercase : Optional[int] = 1 , ):
"""simple docstring"""
snake_case_ = load_image(__lowercase )
snake_case_ = self.image_processor.size["longest_edge"]
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.generate_crop_boxes(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
snake_case_ = self.image_processor(images=__lowercase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
snake_case_ = self.get_inference_context()
with inference_context():
snake_case_ = self._ensure_tensor_on_device(__lowercase , device=self.device )
snake_case_ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
snake_case_ = image_embeddings
snake_case_ = grid_points.shape[1]
snake_case_ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __lowercase , __lowercase ):
snake_case_ = grid_points[:, i : i + points_per_batch, :, :]
snake_case_ = input_labels[:, i : i + points_per_batch]
snake_case_ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Union[str, Any]=0.88 , __lowercase : Union[str, Any]=0.95 , __lowercase : int=0 , __lowercase : int=1 , ):
"""simple docstring"""
snake_case_ = model_inputs.pop("input_boxes" )
snake_case_ = model_inputs.pop("is_last" )
snake_case_ = model_inputs.pop("original_sizes" ).tolist()
snake_case_ = model_inputs.pop("reshaped_input_sizes" ).tolist()
snake_case_ = self.model(**__lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case_ = model_outputs["pred_masks"]
snake_case_ = self.image_processor.post_process_masks(
__lowercase , __lowercase , __lowercase , __lowercase , binarize=__lowercase )
snake_case_ = model_outputs["iou_scores"]
snake_case_ , snake_case_ , snake_case_ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowercase , __lowercase , __lowercase , __lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case__ ( self : str , __lowercase : Any , __lowercase : Optional[int]=False , __lowercase : int=False , __lowercase : List[str]=0.7 , ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
snake_case_ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
snake_case_ = torch.cat(__lowercase )
snake_case_ = torch.cat(__lowercase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.post_process_for_mask_generation(
__lowercase , __lowercase , __lowercase , __lowercase )
snake_case_ = defaultdict(__lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowercase )
snake_case_ = {}
if output_rle_mask:
snake_case_ = rle_mask
if output_bboxes_mask:
snake_case_ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 187
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''input_features''']
def __init__( self , _a=80 , _a=16000 , _a=160 , _a=30 , _a=400 , _a=0.0 , _a=False , **_a , ) -> List[str]:
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , return_attention_mask=_a , **_a , )
lowerCAmelCase_ = n_fft
lowerCAmelCase_ = hop_length
lowerCAmelCase_ = chunk_length
lowerCAmelCase_ = chunk_length * sampling_rate
lowerCAmelCase_ = self.n_samples // hop_length
lowerCAmelCase_ = sampling_rate
lowerCAmelCase_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=_a , norm="slaney" , mel_scale="slaney" , )
def __a ( self , _a ) -> np.ndarray:
lowerCAmelCase_ = spectrogram(
_a , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowerCAmelCase_ = log_spec[:, :-1]
lowerCAmelCase_ = np.maximum(_a , log_spec.max() - 8.0 )
lowerCAmelCase_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _a , _a , _a = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowerCAmelCase_ = np.array(_a , np.intaa )
lowerCAmelCase_ = []
for vector, length in zip(_a , attention_mask.sum(-1 ) ):
lowerCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ = padding_value
normed_input_values.append(_a )
else:
lowerCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , _a , _a = True , _a = None , _a = None , _a = None , _a = "max_length" , _a = None , _a = None , _a = None , **_a , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowerCAmelCase_ = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCAmelCase_ = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ = [np.asarray([raw_speech] ).T]
lowerCAmelCase_ = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowerCAmelCase_ = self.pad(
_a , padding=_a , max_length=max_length if max_length else self.n_samples , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase_ = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowerCAmelCase_ = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowerCAmelCase_ = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowerCAmelCase_ = [self._np_extract_fbank_features(_a ) for waveform in input_features[0]]
if isinstance(input_features[0] , _a ):
lowerCAmelCase_ = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase_ = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase_ = padded_inputs.convert_to_tensors(_a )
return padded_inputs
def __a ( self ) -> Dict[str, Any]:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 22
|
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22
| 1
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
a_ = datasets.utils.logging.get_logger(__name__)
class _UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =datasets.Audio()
lowerCamelCase__ ='audio'
lowerCamelCase__ =AudioFolderConfig
lowerCamelCase__ =42 # definition at the bottom of the script
lowerCamelCase__ =AudioClassification(audio_column='audio' , label_column='label' )
a_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
a_ = AUDIO_EXTENSIONS
| 76
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29
| 0
|
from math import pow, sqrt
def __lowerCAmelCase ( *SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ ) > 0 and all(value > 0.0 for value in values )
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 371
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _snake_case ( lowercase__):
UpperCamelCase__ : str ="""dpr"""
def __init__( self : int, __lowercase : List[str]=3_0522, __lowercase : Any=768, __lowercase : Union[str, Any]=12, __lowercase : Optional[int]=12, __lowercase : List[Any]=3072, __lowercase : List[str]="gelu", __lowercase : Union[str, Any]=0.1, __lowercase : str=0.1, __lowercase : List[str]=512, __lowercase : Optional[int]=2, __lowercase : Dict=0.02, __lowercase : Any=1e-1_2, __lowercase : List[Any]=0, __lowercase : str="absolute", __lowercase : int = 0, **__lowercase : str, ):
super().__init__(pad_token_id=__lowercase, **__lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = projection_dim
lowercase__ = position_embedding_type
| 224
| 0
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowercase = """examples/"""
__lowercase = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
__lowercase = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
__lowercase = """README.md"""
def lowercase ( A_ , A_ , A_ )-> Dict:
'''simple docstring'''
with open(A_ , "r" , encoding="utf-8" , newline="\n" ) as f:
a : str = f.read()
a , a : Union[str, Any] = REPLACE_PATTERNS[pattern]
a : Optional[Any] = replace.replace("VERSION" , A_ )
a : Tuple = re_pattern.sub(A_ , A_ )
with open(A_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(A_ )
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
for folder, directories, fnames in os.walk(A_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(A_ , A_ ) , A_ , pattern="examples" )
def lowercase ( A_ , A_=False )-> List[Any]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A_ , A_ , A_ )
if not patch:
update_version_in_examples(A_ )
def lowercase ( )-> Tuple:
'''simple docstring'''
a : str = "🤗 Transformers currently provides the following architectures"
a : List[str] = "1. Want to contribute a new model?"
with open(A_ , "r" , encoding="utf-8" , newline="\n" ) as f:
a : Optional[int] = f.readlines()
# Find the start of the list.
a : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
a : Optional[Any] = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(A_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(A_ )
def lowercase ( )-> int:
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
a : Any = f.read()
a : str = REPLACE_PATTERNS["init"][0].search(A_ ).groups()[0]
return packaging.version.parse(A_ )
def lowercase ( A_=False )-> Optional[Any]:
'''simple docstring'''
a : int = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
a : Dict = default_version.base_version
elif patch:
a : Any = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a : int = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a : List[Any] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(A_ ) == 0:
a : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(A_ , patch=A_ )
def lowercase ( )-> Union[str, Any]:
'''simple docstring'''
a : Dict = get_version()
a : Union[str, Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a : Optional[int] = current_version.base_version
# Check with the user we got that right.
a : Any = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(A_ ) == 0:
a : List[Any] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(A_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
__lowercase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 40
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = XGLMTokenizer
__magic_name__ = XGLMTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[str] = XGLMTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = '<pad>'
_lowerCAmelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(snake_case__ ) , 1008 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = XGLMTokenizer(snake_case__ , keep_accents=snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def a ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def a ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case__ , f.name )
_lowerCAmelCase : int = XGLMTokenizer(f.name , keep_accents=snake_case__ )
_lowerCAmelCase : Any = pickle.dumps(snake_case__ )
pickle.loads(snake_case__ )
def a ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Any = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = 'I was born in 92000, and this is falsé.'
_lowerCAmelCase : List[Any] = tokenizer.tokenize(snake_case__ )
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = self.get_rust_tokenizer()
_lowerCAmelCase : int = tokenizer.encode(snake_case__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'Hello World!'
_lowerCAmelCase : Optional[Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_lowerCAmelCase : int = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/xglm-564M' , padding=snake_case__ , )
| 368
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__a ='bit'
__a =['preactivation', 'bottleneck']
__a =['SAME', 'VALID']
def __init__( self : Optional[int] , __a : Union[str, Any]=3 , __a : Tuple=64 , __a : Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , __a : str=[3, 4, 6, 3] , __a : str="preactivation" , __a : Dict="relu" , __a : Any=None , __a : Optional[Any]=32 , __a : Dict=0.0 , __a : List[Any]=False , __a : Any=32 , __a : int=1 , __a : Any=None , __a : str=None , **__a : Optional[Any] , ):
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_a = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
_a = num_channels
_a = embedding_size
_a = hidden_sizes
_a = depths
_a = layer_type
_a = hidden_act
_a = global_padding
_a = num_groups
_a = drop_path_rate
_a = embedding_dynamic_padding
_a = output_stride
_a = width_factor
_a = ["stem"] + [f'stage{idx}' for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 63
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , )
assert hasattr(self , """env""" )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCamelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.create_estimator(lowerCamelCase_ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
| 343
| 0
|
"""simple docstring"""
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ , lowercase_ = text, pattern
lowercase_ , lowercase_ = len(lowerCAmelCase_), len(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for i in range(self.textLen - self.patLen + 1):
lowercase_ = self.mismatch_in_text(lowerCAmelCase_)
if mismatch_index == -1:
positions.append(lowerCAmelCase_)
else:
lowercase_ = self.match_in_pattern(self.text[mismatch_index])
lowercase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCAmelCase : List[Any] = "ABAABA"
UpperCAmelCase : str = "AB"
UpperCAmelCase : Dict = BoyerMooreSearch(text, pattern)
UpperCAmelCase : List[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 313
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 1
|
'''simple docstring'''
from collections.abc import Callable
def UpperCAmelCase_ ( __lowercase : Callable[[float], float] , __lowercase : float , __lowercase : float ) -> float:
'''simple docstring'''
_UpperCAmelCase = a
_UpperCAmelCase = b
if function(__lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(__lowercase ) == 0:
return b
elif (
function(__lowercase ) * function(__lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
_UpperCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__lowercase ) == 0:
return mid
elif function(__lowercase ) * function(__lowercase ) < 0:
_UpperCAmelCase = mid
else:
_UpperCAmelCase = mid
_UpperCAmelCase = start + (end - start) / 2.0
return mid
def UpperCAmelCase_ ( __lowercase : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 22
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 1
|
class A:
'''simple docstring'''
def __init__( self : List[Any] , A_ : List[str] ) -> None:
"""simple docstring"""
lowerCamelCase_ = size
lowerCamelCase_ = [0] * size
lowerCamelCase_ = [0] * size
@staticmethod
def a__ ( A_ : Any ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def a__ ( A_ : Tuple ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ = value
while index < self.size:
lowerCamelCase_ = self.get_prev(SCREAMING_SNAKE_CASE_ ) + 1
if current_left_border == index:
lowerCamelCase_ = value
else:
lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_next(SCREAMING_SNAKE_CASE_ )
def a__ ( self : int , A_ : Dict , A_ : Optional[int] ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
lowerCamelCase_ = 0
while left <= right:
lowerCamelCase_ = self.get_prev(SCREAMING_SNAKE_CASE_ )
if left <= current_left:
lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ , self.tree[right] )
lowerCamelCase_ = current_left
else:
lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
from ..utils import DummyObject, requires_backends
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : Any , *A_ : Any , **A_ : Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : Tuple , *A_ : Dict , **A_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : int , *A_ : Dict , **A_ : List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 208
| 0
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = checkpoint
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['encoder.conv_in.weight']
SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict['encoder.conv_in.bias']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['encoder.conv_out.weight']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['encoder.conv_out.bias']
SCREAMING_SNAKE_CASE_ : Tuple = vae_state_dict['encoder.norm_out.weight']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['encoder.norm_out.bias']
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['decoder.conv_in.weight']
SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict['decoder.conv_in.bias']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['decoder.conv_out.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['decoder.conv_out.bias']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['decoder.norm_out.weight']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['decoder.norm_out.bias']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['quant_conv.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_state_dict['quant_conv.bias']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_state_dict['post_quant_conv.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
SCREAMING_SNAKE_CASE_ : Any = {
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(lowerCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
SCREAMING_SNAKE_CASE_ : List[Any] = {
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(lowerCAmelCase__ )
}
for i in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
SCREAMING_SNAKE_CASE_ : List[str] = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = renew_vae_resnet_paths(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = {'old': f"down.{i}.block", 'new': f"down_blocks.{i}.resnets"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
SCREAMING_SNAKE_CASE_ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE_ : str = renew_vae_resnet_paths(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = {'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
SCREAMING_SNAKE_CASE_ : Any = renew_vae_attention_paths(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE_ : int = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = renew_vae_resnet_paths(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'old': f"up.{block_id}.block", 'new': f"up_blocks.{i}.resnets"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = [key for key in vae_state_dict if 'decoder.mid.block' in key]
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : int = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE_ : int = renew_vae_resnet_paths(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = {'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
SCREAMING_SNAKE_CASE_ : Tuple = renew_vae_attention_paths(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
return new_checkpoint
def A_ ( a , a , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE_ : str = OmegaConf.load(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = 5_1_2
SCREAMING_SNAKE_CASE_ : int = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE_ : int = {}
with safe_open(lowerCAmelCase__ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.get_tensor(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : str = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )['state_dict']
# Convert the VAE model.
SCREAMING_SNAKE_CASE_ : List[str] = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(**lowerCAmelCase__ )
vae.load_state_dict(lowerCAmelCase__ )
vae.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
lowerCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 253
|
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[str] = logging.getLogger()
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , 'all_results.json' )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'r' ) as f:
lowerCAmelCase_ : Any = json.load(lowerCAmelCase__ )
else:
raise ValueError(f"can't find {path}" )
return results
lowercase__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Any ):
import xla_spawn
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[Any] = time()
xla_spawn.main()
lowerCAmelCase_ : Any = time()
lowerCAmelCase_ : Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
import xla_spawn
lowerCAmelCase_ : Dict = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
xla_spawn.main()
| 224
| 0
|
"""simple docstring"""
from typing import Any
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> Tuple:
"""simple docstring"""
_validation(
__a, __a, __a, __a, __a, )
# Creates data structures and fill initial step
_UpperCamelCase = {}
_UpperCamelCase = {}
for state in states_space:
_UpperCamelCase = observations_space[0]
_UpperCamelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCamelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1, len(__a ) ):
_UpperCamelCase = observations_space[o]
_UpperCamelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCamelCase = ''
_UpperCamelCase = -1
for k_state in states_space:
_UpperCamelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCamelCase = probability
_UpperCamelCase = k_state
# Update probabilities and pointers dicts
_UpperCamelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCamelCase = arg_max
# The final observation
_UpperCamelCase = observations_space[len(__a ) - 1]
# argmax for given final observation
_UpperCamelCase = ''
_UpperCamelCase = -1
for k_state in states_space:
_UpperCamelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCamelCase = probability
_UpperCamelCase = k_state
_UpperCamelCase = arg_max
# Process pointers backwards
_UpperCamelCase = last_state
_UpperCamelCase = []
for o in range(len(__a ) - 1, -1, -1 ):
result.append(__a )
_UpperCamelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> str:
"""simple docstring"""
_validate_not_empty(
__a, __a, __a, __a, __a, )
_validate_lists(__a, __a )
_validate_dicts(
__a, __a, __a )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> str:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_validate_list(__a, '''observations_space''' )
_validate_list(__a, '''states_space''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_object, __a ):
_UpperCamelCase = F'''{var_name} must be a list'''
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a, __a ):
_UpperCamelCase = F'''{var_name} must be a list of strings'''
raise ValueError(__a )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, ) -> Tuple:
"""simple docstring"""
_validate_dict(__a, '''initial_probabilities''', __a )
_validate_nested_dict(__a, '''transition_probabilities''' )
_validate_nested_dict(__a, '''emission_probabilities''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_validate_dict(_object, __a, __a )
for x in _object.values():
_validate_dict(__a, __a, __a, __a )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case = False ) -> int:
"""simple docstring"""
if not isinstance(_object, __a ):
_UpperCamelCase = F'''{var_name} must be a dict'''
raise ValueError(__a )
if not all(isinstance(__a, __a ) for x in _object ):
_UpperCamelCase = F'''{var_name} all keys must be strings'''
raise ValueError(__a )
if not all(isinstance(__a, __a ) for x in _object.values() ):
_UpperCamelCase = 'nested dictionary ' if nested else ''
_UpperCamelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_hidden_groups
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForPreTraining(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForMaskedLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = AlbertForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForTokenClassification(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = AlbertForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCAmelCase ( self , __a , __a , __a=False) -> Tuple:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AlbertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AlbertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''')
_UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
| 100
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=a__):
SCREAMING_SNAKE_CASE__ = ['''torch''', '''torchsde''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 5
|
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25
| 0
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Dict = OmegaConf.load(snake_case__ )
_snake_case : Optional[Any] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
_snake_case : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case : Tuple = {}
_snake_case : Optional[Any] = """first_stage_model."""
for key in keys:
if key.startswith(snake_case__ ):
_snake_case : Optional[int] = state_dict[key]
# extract state_dict for UNetLDM
_snake_case : Union[str, Any] = {}
_snake_case : Dict = """model.diffusion_model."""
for key in keys:
if key.startswith(snake_case__ ):
_snake_case : Any = state_dict[key]
_snake_case : str = config.model.params.first_stage_config.params
_snake_case : Any = config.model.params.unet_config.params
_snake_case : Optional[Any] = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
_snake_case : Tuple = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
_snake_case : List[str] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
_snake_case : Any = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
A_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 132
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
_snake_case : Dict = set({"""(""", """[""", """{"""} )
_snake_case : Union[str, Any] = set({""")""", """]""", """}"""} )
_snake_case : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(snake_case__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(snake_case__ ) == 0 or (len(snake_case__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(snake_case__ ) == 0
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = input("""Enter sequence of brackets: """ )
if is_balanced(snake_case__ ):
print(snake_case__ , """is balanced""" )
else:
print(snake_case__ , """is not balanced""" )
if __name__ == "__main__":
main()
| 132
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=a__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=a__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=a__ )
return parser.parse_args()
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE : Any = script_fpath.stem
SCREAMING_SNAKE_CASE : List[Any] = importlib.import_module(a__ )
# Patch sys.argv
SCREAMING_SNAKE_CASE : List[Any] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
'''simple docstring'''
import qiskit
def UpperCAmelCase ( a_ , a_ ) -> qiskit.result.counts.Counts:
"""simple docstring"""
A_ : Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
A_ : Optional[int] = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(a_ , a_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 164
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> str:
super().__init__()
A_ : Any = module
A_ : Any = nn.Sequential(
nn.Linear(module.in_features , _lowerCamelCase , bias=_lowerCamelCase ) , nn.Linear(_lowerCamelCase , module.out_features , bias=_lowerCamelCase ) , )
A_ : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase_ ( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
return self.module(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) + self.adapter(_lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = '''bigscience/bloom-1b7'''
# Constant values
lowerCamelCase = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase = '''Hello my name is'''
lowerCamelCase = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
lowerCamelCase = 10
def UpperCAmelCase_ ( self ) -> List[str]:
# Models and tokenizer
A_ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
# Models and tokenizer
A_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : str = self.model_abit.config
self.assertTrue(hasattr(_lowerCamelCase , """quantization_config""" ) )
A_ : Union[str, Any] = config.to_dict()
A_ : Optional[int] = config.to_diff_dict()
A_ : Tuple = config.to_json_string()
def UpperCAmelCase_ ( self ) -> str:
from bitsandbytes.nn import Paramsabit
A_ : List[Any] = self.model_fpaa.get_memory_footprint()
A_ : Tuple = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A_ : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase_ ( self ) -> List[str]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_lowerCamelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A_ : int = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = BitsAndBytesConfig()
A_ : Tuple = True
A_ : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_lowerCamelCase , device_map="""auto""" )
A_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A_ : Optional[Any] = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase_ ( self ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Union[str, Any] = BitsAndBytesConfig()
with self.assertRaises(_lowerCamelCase ):
A_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_lowerCamelCase , load_in_abit=_lowerCamelCase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def UpperCAmelCase_ ( self ) -> str:
with self.assertRaises(_lowerCamelCase ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A_ : Tuple = self.model_fpaa.to(torch.floataa )
A_ : int = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A_ : Any = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A_ : str = self.model_fpaa.half()
# Check this does not throw an error
A_ : Any = self.model_fpaa.float()
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=_lowerCamelCase , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[int]:
A_ : Optional[int] = """t5-small"""
A_ : List[str] = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
A_ : Optional[Any] = """Translate in German: Hello, my dog is cute"""
def UpperCAmelCase_ ( self ) -> Optional[Any]:
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
from transformers import TaForConditionalGeneration
A_ : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
A_ : Any = None
# test with `t5-small`
A_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
A_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : Optional[int] = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
A_ : Tuple = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
A_ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : str = model.generate(**_lowerCamelCase )
A_ : Optional[int] = modules
def UpperCAmelCase_ ( self ) -> List[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A_ : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : List[Any] = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
A_ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
A_ : int = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : Optional[int] = model.generate(**_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> int:
super().setUp()
# model_name
A_ : Dict = """bigscience/bloom-560m"""
A_ : Union[str, Any] = """t5-small"""
# Different types of model
A_ : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# Sequence classification model
A_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# CausalLM model
A_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# Seq2seq model
A_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
super().setUp()
def UpperCAmelCase_ ( self ) -> Any:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : List[str] = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A_ : int = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
super().setUp()
def UpperCAmelCase_ ( self ) -> str:
A_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_lowerCamelCase , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A_ : int = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Union[str, Any] = """facebook/opt-350m"""
super().setUp()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A_ : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A_ : Any = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_lowerCamelCase ) ):
A_ : int = LoRALayer(module.q_proj , rank=16 )
A_ : Optional[int] = LoRALayer(module.k_proj , rank=16 )
A_ : Union[str, Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A_ : Dict = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A_ : Dict = model.forward(**_lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_lowerCamelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''gpt2-xl'''
lowerCamelCase = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 164
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Optional[int] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
lowerCAmelCase__ : List[Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=False ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__lowerCamelCase : str = os.path.abspath(_lowerCAmelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
__lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase ,map_location='cpu' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__lowerCamelCase : Tuple = convert_pytorch_state_dict_to_flax(_lowerCAmelCase ,_lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__lowerCamelCase : Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(_lowerCAmelCase ,_lowerCAmelCase )
return flax_state_dict
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(_lowerCAmelCase ) -> bool:
return len(set(_lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__lowerCamelCase : List[str] = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__lowerCamelCase : int = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__lowerCamelCase : List[str] = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__lowerCamelCase : Tuple = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowerCamelCase : List[str] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
__lowerCamelCase : List[Any] = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowerCamelCase : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
__lowerCamelCase : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowerCamelCase : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowerCamelCase : int = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__lowerCamelCase : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__lowerCamelCase : Union[str, Any] = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__lowerCamelCase : Tuple = pt_tuple_key[-2] + '_v'
if name is not None:
__lowerCamelCase : Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
# convert pytorch tensor to numpy
__lowerCamelCase : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCamelCase : str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__lowerCamelCase : List[Any] = flax_model.params['params']
else:
__lowerCamelCase : List[str] = flax_model.params
__lowerCamelCase : Tuple = flatten_dict(_lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCamelCase : Optional[int] = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_lowerCAmelCase )
__lowerCamelCase : str = {}
__lowerCamelCase : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__lowerCamelCase : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCamelCase : Any = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__lowerCamelCase : Any = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCamelCase : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCamelCase ,__lowerCamelCase : Dict = rename_key_and_reshape_tensor(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# add model prefix if necessary
__lowerCamelCase : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCamelCase : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__lowerCamelCase : List[Any] = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__lowerCamelCase : List[Any] = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__lowerCamelCase : str = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Any:
import torch
# Load the index
__lowerCamelCase : Optional[int] = {}
for shard_file in shard_filenames:
# load using msgpack utils
__lowerCamelCase : Dict = torch.load(_lowerCAmelCase )
__lowerCamelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCamelCase : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCamelCase : str = flax_model.params['params']
__lowerCamelCase : Tuple = flatten_dict(_lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__lowerCamelCase : Dict = flax_model.params
__lowerCamelCase : Optional[int] = flatten_dict(_lowerCAmelCase )
__lowerCamelCase : List[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__lowerCamelCase : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCamelCase : Optional[Any] = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__lowerCamelCase : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCamelCase : Optional[int] = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCamelCase ,__lowerCamelCase : Any = rename_key_and_reshape_tensor(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# add model prefix if necessary
__lowerCamelCase : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCamelCase : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__lowerCamelCase : int = jnp.asarray(_lowerCAmelCase )
continue
if "var" in flax_key[-1]:
__lowerCamelCase : Tuple = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__lowerCamelCase : Optional[int] = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__lowerCamelCase : Tuple = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = os.path.abspath(_lowerCAmelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__lowerCamelCase : str = getattr(_lowerCAmelCase ,'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCAmelCase ,'rb' ) as state_f:
try:
__lowerCamelCase : Tuple = from_bytes(_lowerCAmelCase ,state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_lowerCAmelCase ,_lowerCAmelCase )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__lowerCamelCase : Any = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa ,_lowerCAmelCase ) ).values()
if any(_lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__lowerCamelCase : Dict = jax.tree_util.tree_map(
lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,_lowerCAmelCase )
__lowerCamelCase : Any = flatten_dict(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = pt_model.state_dict()
__lowerCamelCase : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__lowerCamelCase : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__lowerCamelCase : Any = []
__lowerCamelCase : Union[str, Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCamelCase : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
__lowerCamelCase : Dict = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCamelCase : List[Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCamelCase : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# conv layer
__lowerCamelCase : Tuple = flax_key_tuple[:-1] + ('weight',)
__lowerCamelCase : Tuple = jnp.transpose(_lowerCAmelCase ,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# linear layer
__lowerCamelCase : Dict = flax_key_tuple[:-1] + ('weight',)
__lowerCamelCase : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCamelCase : Optional[Any] = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__lowerCamelCase : str = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__lowerCamelCase : int = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__lowerCamelCase : Tuple = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__lowerCamelCase : Optional[int] = '.'.join(_lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__lowerCamelCase : Tuple = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__lowerCamelCase : str = key.split('.' )
__lowerCamelCase : Tuple = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__lowerCamelCase : Any = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__lowerCamelCase : Tuple = key_components[-2] + '_v'
if name is not None:
__lowerCamelCase : Optional[int] = key_components[:-3] + [name]
__lowerCamelCase : Union[str, Any] = '.'.join(_lowerCAmelCase )
__lowerCamelCase : Optional[int] = key
if flax_key in special_pt_names:
__lowerCamelCase : int = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__lowerCamelCase : Tuple = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase ,np.ndarray ) else flax_tensor
__lowerCamelCase : List[str] = torch.from_numpy(_lowerCAmelCase )
# remove from missing keys
missing_keys.remove(_lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCAmelCase )
pt_model.load_state_dict(_lowerCAmelCase )
# re-transform missing_keys to list
__lowerCamelCase : int = list(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_lowerCAmelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 208
| 0
|
import os
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "input.txt" ):
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as input_file:
lowercase__ = [
[int(UpperCamelCase__ ) for element in line.split("," )]
for line in input_file.readlines()
]
lowercase__ = len(UpperCamelCase__ )
lowercase__ = len(matrix[0] )
lowercase__ = [[-1 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
lowercase__ = matrix[i][0]
for j in range(1 , UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
lowercase__ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCamelCase__ ):
lowercase__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 367
|
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224
| 0
|
'''simple docstring'''
from math import factorial
def lowerCAmelCase_ ( snake_case__ = 20 ):
'''simple docstring'''
A : Optional[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
A : List[str] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 3
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 10**9 ):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
| 0
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__UpperCamelCase = int(input('''Enter number: ''').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 38
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38
| 1
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a :Optional[int] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
SCREAMING_SNAKE_CASE__ : Any = os.path.abspath(__lowerCAmelCase )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
SCREAMING_SNAKE_CASE__ : str = torch.load(__lowerCAmelCase , map_location="""cpu""" )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
SCREAMING_SNAKE_CASE__ : Dict = convert_pytorch_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
SCREAMING_SNAKE_CASE__ : str = convert_pytorch_sharded_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
return flax_state_dict
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCAmelCase ) -> bool:
return len(set(__lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
SCREAMING_SNAKE_CASE__ : Dict = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
SCREAMING_SNAKE_CASE__ : Any = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
SCREAMING_SNAKE_CASE__ : Optional[Any] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
SCREAMING_SNAKE_CASE__ : Tuple = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : int = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE__ : List[str] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE__ : Any = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
SCREAMING_SNAKE_CASE__ : Tuple = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
SCREAMING_SNAKE_CASE__ : int = pt_tuple_key[-2] + """_v"""
if name is not None:
SCREAMING_SNAKE_CASE__ : Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
# convert pytorch tensor to numpy
SCREAMING_SNAKE_CASE__ : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE__ : Any = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
SCREAMING_SNAKE_CASE__ : Optional[int] = flax_model.params["""params"""]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = flax_model.params
SCREAMING_SNAKE_CASE__ : Union[str, Any] = flatten_dict(__lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE__ : Tuple = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE__ : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
SCREAMING_SNAKE_CASE__ : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE__ : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE__ : str = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
import torch
# Load the index
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
SCREAMING_SNAKE_CASE__ : str = torch.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE__ : str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE__ : Tuple = flax_model.params["""params"""]
SCREAMING_SNAKE_CASE__ : List[str] = flatten_dict(__lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
SCREAMING_SNAKE_CASE__ : Any = flax_model.params
SCREAMING_SNAKE_CASE__ : Dict = flatten_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE__ : str = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE__ : str = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
SCREAMING_SNAKE_CASE__ : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE__ : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
SCREAMING_SNAKE_CASE__ : str = jnp.asarray(__lowerCAmelCase )
continue
if "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE__ : List[str] = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE__ : str = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE__ : Any = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(__lowerCAmelCase )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
SCREAMING_SNAKE_CASE__ : int = getattr(__lowerCAmelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCAmelCase , """rb""" ) as state_f:
try:
SCREAMING_SNAKE_CASE__ : int = from_bytes(__lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE__ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __lowerCAmelCase : x.dtype == jnp.bfloataa , __lowerCAmelCase ) ).values()
if any(__lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
SCREAMING_SNAKE_CASE__ : str = jax.tree_util.tree_map(
lambda __lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = flatten_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = pt_model.state_dict()
SCREAMING_SNAKE_CASE__ : str = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
SCREAMING_SNAKE_CASE__ : List[Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE__ : int = flax_key_tuple[0] == pt_model.base_model_prefix
SCREAMING_SNAKE_CASE__ : Any = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE__ : List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# conv layer
SCREAMING_SNAKE_CASE__ : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
SCREAMING_SNAKE_CASE__ : Tuple = jnp.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# linear layer
SCREAMING_SNAKE_CASE__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
SCREAMING_SNAKE_CASE__ : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE__ : Tuple = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE__ : int = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE__ : List[str] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
SCREAMING_SNAKE_CASE__ : str = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
SCREAMING_SNAKE_CASE__ : Dict = """.""".join(__lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
SCREAMING_SNAKE_CASE__ : List[str] = key.split(""".""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
SCREAMING_SNAKE_CASE__ : Tuple = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
SCREAMING_SNAKE_CASE__ : List[Any] = key_components[-2] + """_v"""
if name is not None:
SCREAMING_SNAKE_CASE__ : List[str] = key_components[:-3] + [name]
SCREAMING_SNAKE_CASE__ : str = """.""".join(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = key
if flax_key in special_pt_names:
SCREAMING_SNAKE_CASE__ : Dict = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE__ : Dict = np.asarray(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(__lowerCAmelCase )
# remove from missing keys
missing_keys.remove(__lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCAmelCase )
pt_model.load_state_dict(__lowerCAmelCase )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE__ : Optional[int] = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__lowerCAmelCase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"""If your task is similar to the task the model of the checkpoint was trained on, """
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 132
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
SCREAMING_SNAKE_CASE__ : Optional[int] = test_metrics
@require_cpu
def _a ( self ) -> List[Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ) -> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ) -> int:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 132
| 1
|
def lowerCAmelCase_ ( __lowerCamelCase ):
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__snake_case : List[str] = sum(__lowerCamelCase ) / len(__lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
import numpy
class a :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : numpy.ndarray , lowerCamelCase : numpy.ndarray ) -> None:
__snake_case : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__snake_case : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__snake_case : Optional[int] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__snake_case : int = numpy.random.rand(3 , 1 )
# Real output values provided.
__snake_case : Optional[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__snake_case : Optional[int] = numpy.zeros(output_array.shape )
def __snake_case ( self : List[Any] ) -> numpy.ndarray:
__snake_case : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__snake_case : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__snake_case : str = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __snake_case ( self : Union[str, Any] ) -> None:
__snake_case : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__snake_case : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__snake_case : Optional[Any] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __snake_case ( self : List[str] , lowerCamelCase : numpy.ndarray , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__snake_case : Any = self.feedforward()
self.back_propagation()
if give_loss:
__snake_case : str = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'Iteration {iteration} Loss: {loss}' )
def __snake_case ( self : Optional[Any] , lowerCamelCase : numpy.ndarray ) -> int:
__snake_case : Any = input_arr
__snake_case : List[str] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__snake_case : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__snake_case : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( __lowerCamelCase ):
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( __lowerCamelCase ):
return (value) * (1 - (value))
def lowerCAmelCase_ ( ):
__snake_case : Dict = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__snake_case : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__snake_case : int = TwoHiddenLayerNeuralNetwork(
input_array=__lowerCamelCase , output_array=__lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__lowerCamelCase , iterations=1_0 , give_loss=__lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 134
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__A = datasets.utils.logging.get_logger(__name__)
__A = ["names", "prefix"]
__A = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__A = ["encoding_errors", "on_bad_lines"]
__A = ["date_format"]
@dataclass
class A ( datasets.BuilderConfig ):
lowerCamelCase : str = ","
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[int, List[int], str]] = "infer"
lowerCamelCase : Optional[List[str]] = None
lowerCamelCase : Optional[List[str]] = None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
lowerCamelCase : Optional[Union[List[int], List[str]]] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : bool = True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
lowerCamelCase : Optional[list] = None
lowerCamelCase : Optional[list] = None
lowerCamelCase : bool = False
lowerCamelCase : Optional[Union[int, List[int]]] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[Union[str, List[str]]] = None
lowerCamelCase : bool = True
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : Optional[str] = None
lowerCamelCase : str = "."
lowerCamelCase : Optional[str] = None
lowerCamelCase : str = '"'
lowerCamelCase : int = 0
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : bool = True
lowerCamelCase : bool = True
lowerCamelCase : int = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : Optional[str] = None
lowerCamelCase : int = 10000
lowerCamelCase : Optional[datasets.Features] = None
lowerCamelCase : Optional[str] = "strict"
lowerCamelCase : Literal["error", "warn", "skip"] = "error"
lowerCamelCase : Optional[str] = None
def A__ ( self ) -> str:
'''simple docstring'''
if self.delimiter is not None:
lowercase__ = self.delimiter
if self.column_names is not None:
lowercase__ = self.column_names
@property
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCamelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Tuple = CsvConfig
def A__ ( self ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ , (str, list, tuple) ):
lowercase__ = data_files
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = [files]
lowercase__ = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowercase__ = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = [files]
lowercase__ = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def A__ ( self , lowerCamelCase__ ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
lowercase__ = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCamelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
lowercase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCamelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase__ = table_cast(lowerCamelCase__ , lowerCamelCase__ )
return pa_table
def A__ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
lowercase__ = pd.read_csv(lowerCamelCase__ , iterator=lowerCamelCase__ , dtype=lowerCamelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCamelCase__ ):
lowercase__ = pa.Table.from_pandas(lowerCamelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCamelCase__ )}: {e}''' )
raise
| 164
|
'''simple docstring'''
def _A ( lowercase__ ):
lowercase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase__ = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase__ , lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase__ , lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__A = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 164
| 1
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase__ : str = parser.parse_args()
return args.f
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Optional[Any]:
UpperCamelCase__ : str = {}
UpperCamelCase__ : int = os.path.join(lowerCAmelCase__ , '''all_results.json''' )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , '''r''' ) as f:
UpperCamelCase__ : Optional[int] = json.load(lowerCAmelCase__ )
else:
raise ValueError(f"can\'t find {path}" )
return results
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ : int = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls ) -> List[Any]:
"""simple docstring"""
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Tuple = os.path.join(cls.tmpdir, '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
UpperCamelCase__ : int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCamelCase__ : List[str] = get_results(__magic_name__ )
self.assertGreaterEqual(result['''eval_accuracy'''], 0.75 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCamelCase__ : Tuple = get_results(__magic_name__ )
self.assertLess(result['''perplexity'''], 100 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[str] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : Dict = get_results(__magic_name__ )
self.assertLess(result['''perplexity'''], 42 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase__ : int = 7 if get_gpu_count() > 1 else 2
UpperCamelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[Any] = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : Tuple = get_results(__magic_name__ )
self.assertGreaterEqual(result['''eval_accuracy'''], 0.75 )
self.assertLess(result['''train_loss'''], 0.5 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Tuple = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : List[Any] = get_results(__magic_name__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''], 28 )
self.assertGreaterEqual(result['''eval_exact'''], 28 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : str = get_results(__magic_name__ )
self.assertGreaterEqual(result['''eval_accuracy'''], 0.8 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : int = get_results(__magic_name__ )
self.assertGreaterEqual(result['''eval_rouge1'''], 10 )
self.assertGreaterEqual(result['''eval_rouge2'''], 2 )
self.assertGreaterEqual(result['''eval_rougeL'''], 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''], 7 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : Any = get_results(__magic_name__ )
self.assertGreaterEqual(result['''eval_bleu'''], 30 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''translation_no_trainer''' ) ) )
@slow
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = logging.StreamHandler(sys.stdout )
logger.addHandler(__magic_name__ )
UpperCamelCase__ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : List[str] = get_results(__magic_name__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''], 0.10 )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[str] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCamelCase__ : Tuple = get_results(__magic_name__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''], 0.6 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''image_classification_no_trainer''' ) ) )
| 366
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Optional[Any]:
UpperCamelCase__ : Any = test_results.split(''' ''' )
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCamelCase__ : List[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Tuple:
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : int = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __UpperCAmelCase ):
UpperCamelCase__ : Any = True
UpperCamelCase__ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCamelCase__ : List[Any] = line
UpperCamelCase__ : List[Any] = False
return failures
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = title
UpperCamelCase__ : Tuple = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCamelCase__ : Optional[Any] = doc_test_results['''success''']
UpperCamelCase__ : str = doc_test_results['''failures''']
UpperCamelCase__ : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCamelCase__ : List[Any] = doc_test_results
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [self._time_spent]
UpperCamelCase__ : str = 0
for time in time_spent:
UpperCamelCase__ : List[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__magic_name__ ) == 1:
UpperCamelCase__ : List[Any] = [0, 0, time_parts[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(__magic_name__ )}h{int(__magic_name__ )}m{int(__magic_name__ )}s"
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 40
UpperCamelCase__ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__magic_name__, __magic_name__ )}
UpperCamelCase__ : List[str] = ''''''
for category, failures in category_failures.items():
if len(__magic_name__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__magic_name__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__magic_name__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=__magic_name__, )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCamelCase__ : List[str] = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
UpperCamelCase__ : Optional[Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ''''''
for key, value in failures.items():
UpperCamelCase__ : List[Any] = value[:200] + ''' [Truncated]''' if len(__magic_name__ ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
UpperCamelCase__ : Union[str, Any] = job_name
UpperCamelCase__ : Any = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCamelCase__ : Union[str, Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCamelCase__ : Optional[int] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCamelCase__ : Optional[int] = sorted(self.doc_test_results.items(), key=lambda __magic_name__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCamelCase__ : Any = f"*Num failures* :{len(job_result['failed'] )} \n"
UpperCamelCase__ : Optional[Any] = job_result['''failures''']
UpperCamelCase__ : Optional[Any] = self.get_reply_blocks(__magic_name__, __magic_name__, __magic_name__, text=__magic_name__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f"Results for {job}", blocks=__magic_name__, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Any = os.environ['''GITHUB_RUN_ID''']
UpperCamelCase__ : Tuple = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
UpperCamelCase__ : Optional[int] = requests.get(__UpperCAmelCase ).json()
UpperCamelCase__ : List[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCamelCase__ : List[Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Any = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[int] = {}
if os.path.exists(__UpperCAmelCase ):
UpperCamelCase__ : Dict = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
UpperCamelCase__ : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}." ) from e
return _artifact
def lowerCAmelCase_ ( ) -> str:
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = name
UpperCamelCase__ : int = []
def __str__( self ) -> Tuple:
"""simple docstring"""
return self.name
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCamelCase__ : Dict[str, Artifact] = {}
UpperCamelCase__ : Union[str, Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCamelCase__ : Optional[int] = directory
if artifact_name not in _available_artifacts:
UpperCamelCase__ : Union[str, Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase_ = get_job_links()
UpperCAmelCase_ = retrieve_available_artifacts()
UpperCAmelCase_ = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase_ = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase_ = github_actions_job_links.get('run_doctests')
UpperCAmelCase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
UpperCAmelCase_ = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = handle_test_results(artifact['stats'])
UpperCAmelCase_ = failed
UpperCAmelCase_ = success
UpperCAmelCase_ = time_spent[1:-1] + ', '
UpperCAmelCase_ = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
UpperCAmelCase_ = line.replace('FAILED ', '')
UpperCAmelCase_ = line.split()[0].replace('\n', '')
if "::" in line:
UpperCAmelCase_ , UpperCAmelCase_ = line.split('::')
else:
UpperCAmelCase_ , UpperCAmelCase_ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase_ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase_ = all_failures[test] if test in all_failures else 'N/A'
UpperCAmelCase_ = failure
break
UpperCAmelCase_ = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 247
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
def __init__( self : Dict , lowercase : Optional[Any] , lowercase : Tuple=13 , lowercase : Optional[Any]=7 , lowercase : Any=True , lowercase : Union[str, Any]=True , lowercase : Optional[Any]=True , lowercase : str=True , lowercase : Optional[Any]=99 , lowercase : Dict=32 , lowercase : Tuple=2 , lowercase : Optional[int]=4 , lowercase : Optional[Any]=37 , lowercase : Optional[int]="gelu" , lowercase : Tuple=0.1 , lowercase : Union[str, Any]=0.1 , lowercase : List[Any]=512 , lowercase : Any=16 , lowercase : Optional[Any]=2 , lowercase : List[str]=0.02 , lowercase : Dict=3 , lowercase : List[Any]=4 , lowercase : str=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 37
UpperCAmelCase = '''gelu'''
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = None
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : List[str] , lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerModel(config=lowercase )
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(lowercase )
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , lowercase : Any , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : int , lowercase : List[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase = True
UpperCAmelCase = TFRoFormerForCausalLM(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Any , lowercase : str , lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerForMaskedLM(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , lowercase : Dict , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Any , lowercase : Tuple , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFRoFormerForSequenceClassification(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFRoFormerForMultipleChoice(config=lowercase )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str , lowercase : List[str] , lowercase : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Any , lowercase : Optional[int] , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFRoFormerForTokenClassification(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Dict , lowercase : int , lowercase : Union[str, Any] , lowercase : Any , lowercase : List[str] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerForQuestionAnswering(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _a ( __a , __a , unittest.TestCase ):
__a : Any = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__a : Tuple = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : List[Any] = False
__a : Dict = False
def A ( self : Union[str, Any] , lowercase : Any , lowercase : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowercase )
@require_tf
class _a ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(lowercase )[0]
# TODO Replace vocab size
UpperCAmelCase = 50_000
UpperCAmelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , lowercase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
@require_tf
class _a ( unittest.TestCase ):
__a : List[Any] = 1e-4
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = tf.constant([[4, 10]] )
UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCAmelCase = emba(input_ids.shape )
UpperCAmelCase = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowercase , lowercase , atol=self.tolerance )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCAmelCase = emba.weight[:3, :5]
tf.debugging.assert_near(lowercase , lowercase , atol=self.tolerance )
@require_tf
class _a ( unittest.TestCase ):
__a : Dict = 1e-4
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCAmelCase = embed_positions([2, 16, 768] )[None, None, :, :]
UpperCAmelCase , UpperCAmelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase , lowercase , lowercase )
UpperCAmelCase = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCAmelCase = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowercase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowercase , atol=self.tolerance )
| 34
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : int = 100 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Any = (n * (n + 1) // 2) ** 2
lowerCAmelCase_ : Optional[int] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 224
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12
|
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : Dict = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Any = {
'''allenai/led-base-16384''': 1_63_84,
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : int = VOCAB_FILES_NAMES
snake_case__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : int = LEDTokenizer
snake_case__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : int=None , __lowerCamelCase : Any="replace" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : Tuple="<mask>" , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=True , **__lowerCamelCase : str , ):
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
UpperCamelCase :Union[str, Any] = getattr(__lowerCamelCase , pre_tok_state.pop("""type""" ) )
UpperCamelCase :Optional[Any] = add_prefix_space
UpperCamelCase :Tuple = pre_tok_class(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase :str = """post_processor"""
UpperCamelCase :List[str] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
UpperCamelCase :Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase :int = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase :Optional[int] = tuple(state["""cls"""] )
UpperCamelCase :Optional[Any] = False
if state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
UpperCamelCase :Optional[Any] = add_prefix_space
UpperCamelCase :Union[str, Any] = True
if state.get("""trim_offsets""" , __lowerCamelCase ) != trim_offsets:
UpperCamelCase :Tuple = trim_offsets
UpperCamelCase :Tuple = True
if changes_to_apply:
UpperCamelCase :Tuple = getattr(__lowerCamelCase , state.pop("""type""" ) )
UpperCamelCase :int = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _A ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _A ( self : Dict , __lowerCamelCase : List[str] ):
UpperCamelCase :Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
UpperCamelCase :List[Any] = value
def _A ( self : Union[str, Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[int] ):
UpperCamelCase :Optional[int] = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Tuple , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : str ):
UpperCamelCase :Tuple = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
UpperCamelCase :List[str] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None ):
UpperCamelCase :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _A ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :Dict = [self.sep_token_id]
UpperCamelCase :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self : Tuple , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
UpperCamelCase :List[Any] = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase :List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase :str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase :Union[str, Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(__lowerCamelCase )
if needs_to_be_padded:
UpperCamelCase :Optional[int] = len(__lowerCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase :Optional[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase :Union[str, Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 38
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case__ : ClassVar[Features] = Features({"""audio""": Audio()} )
snake_case__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
snake_case__ : str = "audio"
snake_case__ : str = "transcription"
def _A ( self : List[str] , __lowerCamelCase : Dict ):
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowerCamelCase ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
UpperCamelCase :int = copy.deepcopy(self )
UpperCamelCase :Any = self.input_schema.copy()
UpperCamelCase :List[str] = features[self.audio_column]
UpperCamelCase :List[Any] = input_schema
return task_template
@property
def _A ( self : Optional[int] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 38
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""")
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator("""This is a test""" , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
lowercase_ = text_generator(["""This is a test""", """This is a second test"""])
self.assertEqual(
lowerCAmelCase_ , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
lowercase_ = text_generator("""This is a test""" , do_sample=lowerCAmelCase_ , num_return_sequences=2 , return_tensors=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
] , )
lowercase_ = text_generator.model.config.eos_token_id
lowercase_ = """<pad>"""
lowercase_ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase_ , )
self.assertEqual(
lowerCAmelCase_ , [
[
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
],
[
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
],
] , )
@require_tf
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""")
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator("""This is a test""" , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
lowercase_ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = TextGenerationPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_)
return text_generator, ["This is a test", "Another test"]
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = """Hello I believe in"""
lowercase_ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""")
lowercase_ = text_generator(lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
lowercase_ = text_generator(lowerCAmelCase_ , stop_sequence=""" fe""")
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": """Hello I believe in fe"""}])
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = text_generator.model
lowercase_ = text_generator.tokenizer
lowercase_ = text_generator("""This is a test""")
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test"""))
lowercase_ = text_generator("""This is a test""" , return_full_text=lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""])
lowercase_ = pipeline(task="""text-generation""" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , return_full_text=lowerCAmelCase_)
lowercase_ = text_generator("""This is a test""")
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""])
lowercase_ = text_generator("""This is a test""" , return_full_text=lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test"""))
lowercase_ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
[{"""generated_text""": ANY(lowerCAmelCase_)}, {"""generated_text""": ANY(lowerCAmelCase_)}],
[{"""generated_text""": ANY(lowerCAmelCase_)}, {"""generated_text""": ANY(lowerCAmelCase_)}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowercase_ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
[{"""generated_text""": ANY(lowerCAmelCase_)}, {"""generated_text""": ANY(lowerCAmelCase_)}],
[{"""generated_text""": ANY(lowerCAmelCase_)}, {"""generated_text""": ANY(lowerCAmelCase_)}],
] , )
with self.assertRaises(lowerCAmelCase_):
lowercase_ = text_generator("""test""" , return_full_text=lowerCAmelCase_ , return_text=lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_):
lowercase_ = text_generator("""test""" , return_full_text=lowerCAmelCase_ , return_tensors=lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_):
lowercase_ = text_generator("""test""" , return_text=lowerCAmelCase_ , return_tensors=lowerCAmelCase_)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowercase_ = text_generator("""""")
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
else:
with self.assertRaises((ValueError, AssertionError)):
lowercase_ = text_generator("""""")
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowercase_ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0)
lowercase_ = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0)
# Hole strategy cannot work
with self.assertRaises(lowerCAmelCase_):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowercase_ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
lowercase_ = pipe("""This is a test""")
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowercase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
lowercase_ = pipe("""This is a test""")
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowercase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""")
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa)
lowercase_ = pipe("""This is a test""")
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
import torch
lowercase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa)
pipe("""This is a test""")
@require_torch
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self : str):
"""simple docstring"""
import torch
lowercase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa)
pipe("""This is a test""" , do_sample=lowerCAmelCase_ , top_p=0.5)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = """Hello world"""
lowercase_ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""")
if text_generator.model.framework == "tf":
lowercase_ = logging.get_logger("""transformers.generation.tf_utils""")
else:
lowercase_ = logging.get_logger("""transformers.generation.utils""")
lowercase_ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowerCAmelCase_) as cl:
lowercase_ = text_generator(lowerCAmelCase_ , max_length=1_0 , max_new_tokens=1)
self.assertIn(lowerCAmelCase_ , cl.out)
# The user only sets one -> no warning
with CaptureLogger(lowerCAmelCase_) as cl:
lowercase_ = text_generator(lowerCAmelCase_ , max_new_tokens=1)
self.assertNotIn(lowerCAmelCase_ , cl.out)
with CaptureLogger(lowerCAmelCase_) as cl:
lowercase_ = text_generator(lowerCAmelCase_ , max_length=1_0)
self.assertNotIn(lowerCAmelCase_ , cl.out)
| 358
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 0
|
from __future__ import annotations
__lowerCAmelCase : Dict =[]
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
for i in range(len(__snake_case ) ):
if board[row][i] == 1:
return False
for i in range(len(__snake_case ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ):
if board[i][j] == 1:
return False
return True
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if row >= len(__snake_case ):
solution.append(__snake_case )
printboard(__snake_case )
print()
return True
for i in range(len(__snake_case ) ):
if is_safe(__snake_case , __snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE : str = 1
solve(__snake_case , row + 1 )
__SCREAMING_SNAKE_CASE : List[Any] = 0
return False
def _UpperCamelCase ( lowercase__ ):
for i in range(len(__snake_case ) ):
for j in range(len(__snake_case ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__lowerCAmelCase : List[Any] =8
__lowerCAmelCase : Optional[int] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 9
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__snake_case : Tuple = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__snake_case : Optional[Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = RoFormerTokenizer
def __init__( self : str , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any="[UNK]" , lowerCAmelCase_ : List[Any]="[SEP]" , lowerCAmelCase_ : Union[str, Any]="[PAD]" , lowerCAmelCase_ : Optional[Any]="[CLS]" , lowerCAmelCase_ : Dict="[MASK]" , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Tuple , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents
):
A__ : int =getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""" ) )
A__ : Union[str, Any] =do_lower_case
A__ : Tuple =strip_accents
A__ : int =pre_tok_class(**lowerCAmelCase_ )
A__ : List[Any] =do_lower_case
def __getstate__( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : Any =self.__dict__.copy()
A__ : List[str] =BertPreTokenizer()
return state
def __setstate__( self : int , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
A__ : str =d
A__ : Optional[Any] =self.__dict__["""_tokenizer"""].get_vocab()
A__ : Any =PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase_ ) )
def lowercase__ ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=None ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : int =[self.sep_token_id]
A__ : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ : List[Any] =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : Tuple , ) -> List[Any]:
'''simple docstring'''
A__ : List[Any] =BertPreTokenizer()
return super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
| 134
| 0
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Tuple = 'vision-encoder-decoder'
lowercase__ : Optional[int] = True
def __init__( self , **lowerCamelCase__ ):
super().__init__(**lowerCamelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_lowerCamelCase = kwargs.pop('''encoder''' )
_lowerCamelCase = encoder_config.pop('''model_type''' )
_lowerCamelCase = kwargs.pop('''decoder''' )
_lowerCamelCase = decoder_config.pop('''model_type''' )
_lowerCamelCase = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = True
@classmethod
def snake_case__ ( cls , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_lowerCamelCase = True
_lowerCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.encoder.to_dict()
_lowerCamelCase = self.decoder.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : int = version.parse('1.11' )
@property
def snake_case__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__ ( self ):
return 1e-4
@property
def snake_case__ ( self ):
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class lowerCamelCase_( A__ ):
'''simple docstring'''
@property
def snake_case__ ( self ):
_lowerCamelCase = OrderedDict()
_lowerCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_lowerCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_lowerCamelCase = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , ):
import torch
_lowerCamelCase = OrderedDict()
_lowerCamelCase = super().generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = dummy_input['''input_ids'''].shape
_lowerCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCamelCase = dummy_input.pop('''input_ids''' )
_lowerCamelCase = dummy_input.pop('''attention_mask''' )
_lowerCamelCase = torch.zeros(lowerCamelCase__ )
return common_inputs
class lowerCamelCase_( A__ ):
'''simple docstring'''
@property
def snake_case__ ( self ):
pass
def snake_case__ ( self , lowerCamelCase__ ):
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = "default" ):
_lowerCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase__ , lowerCamelCase__ )
| 73
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=3_2 , lowerCamelCase__=3 , lowerCamelCase__=1_0 , lowerCamelCase__=[1_0, 2_0, 3_0, 4_0] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = num_channels
_lowerCamelCase = embeddings_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_act
_lowerCamelCase = num_labels
_lowerCamelCase = scope
_lowerCamelCase = len(lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = self.get_config()
return config, pixel_values
def snake_case__ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FlaxRegNetModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = FlaxRegNetForImageClassification(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase__ : List[Any] = False
lowercase__ : Tuple = False
lowercase__ : Union[str, Any] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxRegNetModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
return
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , **lowerCamelCase__ ):
return model(pixel_values=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
_lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''np''' )
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 73
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Optional[int] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
lowerCAmelCase__ : List[Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to log verbose messages or not.'''}, )
lowercase__ = field(
default=2.0, metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.5, metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.99_99_95, metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
A__ = logging.WARNING
if model_args.verbose_logging:
A__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
A__ = logging.INFO
logger.setLevel(lowercase_ )
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default='''train''', metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
}, )
lowercase__ = field(
default='''validation''', metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
}, )
lowercase__ = field(
default='''file''', metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase__ = field(
default=1, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
lowercase__ = field(
default=20.0, metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase_ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = "longest"
lowercase__ = None
lowercase__ = None
def __call__( self : Tuple , snake_case_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.feature_extractor.pad(
snake_case_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
A__ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
A__ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
A__ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
A__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
A__ = 1
A__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
A__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=snake_case_ , min_masks=2 , )
return batch
class UpperCAmelCase_ ( A_ ):
def __init__( self : Any , *snake_case_ : Dict , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , snake_case_ : str=1.0 , **snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
A__ = 0
A__ = max_gumbel_temp
A__ = min_gumbel_temp
A__ = gumbel_temp_decay
def __magic_name__ ( self : Tuple , snake_case_ : nn.Module , snake_case_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
A__ = self._prepare_inputs(snake_case_ )
if self.use_amp:
with autocast():
A__ = self.compute_loss(snake_case_ , snake_case_ )
else:
A__ = self.compute_loss(snake_case_ , snake_case_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
A__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A__ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
A__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case_ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__, A__, A__ = parser.parse_args_into_dataclasses()
configure_logger(lowercase_ , lowercase_ )
# Downloading and loading a dataset from the hub.
A__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
A__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowercase_ )
def prepare_dataset(lowercase_ ):
# check that all files have the correct sampling rate
A__, A__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
A__ = datasets.map(
lowercase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
A__ = vectorized_datasets.filter(
lambda lowercase_ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowercase_ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
A__ = vectorized_datasets.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
A__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
A__ = WavaVecaForPreTraining(lowercase_ )
A__ = DataCollatorForWavaVecaPretraining(model=lowercase_ , feature_extractor=lowercase_ )
A__ = WavaVecaPreTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowercase_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 247
| 0
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
a : Dict = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
a : List[Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = AudioClassificationPipeline(model=__magic_name__, feature_extractor=__magic_name__ )
# test with a raw waveform
UpperCamelCase__ : List[str] = np.zeros((34000,) )
UpperCamelCase__ : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = examples
UpperCamelCase__ : int = audio_classifier(__magic_name__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
__magic_name__, [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
], )
UpperCamelCase__ : Tuple = audio_classifier(__magic_name__, top_k=1 )
self.assertEqual(
__magic_name__, [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
], )
self.run_torchaudio(__magic_name__ )
@require_torchaudio
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
import datasets
# test with a local file
UpperCamelCase__ : List[Any] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
UpperCamelCase__ : Tuple = dataset[0]['''audio''']['''array''']
UpperCamelCase__ : Any = audio_classifier(__magic_name__ )
self.assertEqual(
__magic_name__, [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
], )
@require_torch
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = '''anton-l/wav2vec2-random-tiny-classifier'''
UpperCamelCase__ : List[str] = pipeline('''audio-classification''', model=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = np.ones((8000,) )
UpperCamelCase__ : Optional[int] = audio_classifier(__magic_name__, top_k=4 )
UpperCamelCase__ : Any = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
UpperCamelCase__ : Optional[Any] = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__magic_name__, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCamelCase__ : List[str] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
UpperCamelCase__ : Optional[int] = audio_classifier(__magic_name__, top_k=4 )
self.assertIn(nested_simplify(__magic_name__, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
import datasets
UpperCamelCase__ : Optional[Any] = '''superb/wav2vec2-base-superb-ks'''
UpperCamelCase__ : str = pipeline('''audio-classification''', model=__magic_name__ )
UpperCamelCase__ : Any = datasets.load_dataset('''anton-l/superb_dummy''', '''ks''', split='''test''' )
UpperCamelCase__ : str = np.array(dataset[3]['''speech'''], dtype=np.floataa )
UpperCamelCase__ : List[str] = audio_classifier(__magic_name__, top_k=4 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=3 ), [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
], )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
| 247
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCAmelCase_ = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 247
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : str = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _a :
'''simple docstring'''
def __init__( self , A__=None , **A__ ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A__ : Dict = model
A__ : Any = kwargs.get("""model_save_dir""" , A__ )
A__ : Optional[int] = kwargs.get("""latest_model_name""" , A__ )
def __call__( self , **A__ ):
A__ : int = {k: np.array(A__ ) for k, v in kwargs.items()}
return self.model.run(A__ , A__ )
@staticmethod
def __A ( A__ , A__=None , A__=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A__ : List[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(A__ , providers=[provider] , sess_options=A__ )
def __A ( self , A__ , A__ = None , **A__ ):
A__ : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A__ : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
A__ : Optional[int] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A__ : str = self.model_save_dir.joinpath(A__ )
if src_path.exists():
A__ : List[str] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
def __A ( self , A__ , **A__ , ):
if os.path.isfile(A__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(A__ , exist_ok=A__ )
# saving model weights/files
self._save_pretrained(A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = None , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , A__ = None , **A__ , ):
A__ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(A__ ):
A__ : Dict = OnnxRuntimeModel.load_model(
os.path.join(A__ , A__ ) , provider=A__ , sess_options=A__ )
A__ : Optional[Any] = Path(A__ )
# load model from hub
else:
# download model
A__ : Union[str, Any] = hf_hub_download(
repo_id=A__ , filename=A__ , use_auth_token=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , )
A__ : List[str] = Path(A__ ).parent
A__ : str = Path(A__ ).name
A__ : Optional[int] = OnnxRuntimeModel.load_model(A__ , provider=A__ , sess_options=A__ )
return cls(model=A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = True , A__ = None , A__ = None , **A__ , ):
A__ : Optional[Any] = None
if len(str(A__ ).split("""@""" ) ) == 2:
A__ , A__ : Union[str, Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , use_auth_token=A__ , **A__ , )
| 141
| 0
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Dict = []
lowercase__ : Tuple = []
lowercase__ : str = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
lowercase__ : Optional[int] = len(a__ ) if (len(a__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(a__ ) , '''Postfix'''.center(a__ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a__ ) == 0:
stack.append(a__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(a__ ) # push x to stack
print(
x.center(8 ) , (''''''.join(a__ )).ljust(a__ ) , (''''''.join(a__ )).ljust(a__ ) , sep=''' | ''' , ) # Output in tabular format
while len(a__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(a__ )).ljust(a__ ) , (''''''.join(a__ )).ljust(a__ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(a__ ) # return Postfix as str
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : int = list(infix[::-1] ) # reverse the infix equation
for i in range(len(a__ ) ):
if infix[i] == "(":
lowercase__ : List[Any] = ''')''' # change "(" to ")"
elif infix[i] == ")":
lowercase__ : int = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(a__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__a: Optional[Any] = input("""\nEnter an Infix Equation = """) # Input an Infix equation
__a: Tuple = ''''''.join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 198
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : str = ['input_values', 'padding_mask']
def __init__( self : Optional[Any] , __snake_case : int = 1 , __snake_case : int = 2_40_00 , __snake_case : float = 0.0 , __snake_case : float = None , __snake_case : float = None , **__snake_case : Dict , ):
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = overlap
@property
def lowerCamelCase_ ( self : List[str] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase_ ( self : List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : List[str] , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : Optional[Union[bool, str, PaddingStrategy]] = None , __snake_case : Optional[bool] = False , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase_ = True
UpperCAmelCase_ = bool(
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase_ = [np.asarray(__snake_case , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
UpperCAmelCase_ = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [np.asarray(__snake_case ).T]
# verify inputs are valid
for idx, example in enumerate(__snake_case ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
UpperCAmelCase_ = None
UpperCAmelCase_ = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase_ = min(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase_ = max(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase_ = '''max_length'''
else:
UpperCAmelCase_ = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase_ = self.pad(
__snake_case , max_length=__snake_case , truncation=__snake_case , padding=__snake_case , return_attention_mask=__snake_case , )
if padding:
UpperCAmelCase_ = padded_inputs.pop('''attention_mask''' )
UpperCAmelCase_ = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
UpperCAmelCase_ = example[..., None]
input_values.append(example.T )
UpperCAmelCase_ = input_values
if return_tensors is not None:
UpperCAmelCase_ = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 177
| 0
|
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Tuple = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = "markuplm"
def __init__( self : List[str] , _lowercase : int=3_05_22 , _lowercase : List[str]=7_68 , _lowercase : List[str]=12 , _lowercase : List[str]=12 , _lowercase : Any=30_72 , _lowercase : int="gelu" , _lowercase : Tuple=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Any=5_12 , _lowercase : Union[str, Any]=2 , _lowercase : int=0.02 , _lowercase : List[Any]=1E-12 , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=2 , _lowercase : Union[str, Any]=2_56 , _lowercase : List[Any]=10_24 , _lowercase : Union[str, Any]=2_16 , _lowercase : Dict=10_01 , _lowercase : Optional[Any]=32 , _lowercase : Tuple=50 , _lowercase : Optional[int]="absolute" , _lowercase : str=True , _lowercase : List[Any]=None , **_lowercase : List[Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
# additional properties
__UpperCAmelCase = max_depth
__UpperCAmelCase = max_xpath_tag_unit_embeddings
__UpperCAmelCase = max_xpath_subs_unit_embeddings
__UpperCAmelCase = tag_pad_id
__UpperCAmelCase = subs_pad_id
__UpperCAmelCase = xpath_unit_hidden_size
| 86
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _UpperCAmelCase :
def __init__( self : Dict , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Dict=32 , _lowercase : Any=2 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=16 , _lowercase : Optional[int]=[1, 2, 1] , _lowercase : int=[2, 2, 4] , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=2.0 , _lowercase : Any=True , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.1 , _lowercase : str="gelu" , _lowercase : List[Any]=False , _lowercase : List[Any]=True , _lowercase : Optional[Any]=0.02 , _lowercase : str=1E-5 , _lowercase : str=True , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Any=10 , _lowercase : int=8 , _lowercase : Optional[Any]=["stage1", "stage2", "stage3"] , _lowercase : Optional[Any]=[1, 2, 3] , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = patch_norm
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = is_training
__UpperCAmelCase = scope
__UpperCAmelCase = use_labels
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = encoder_stride
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
def a ( self : int ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Dict ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int ):
__UpperCAmelCase = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a ( self : int , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ):
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
__UpperCAmelCase = ['''stem''']
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
a__ : List[str] = False
a__ : int = False
a__ : str = False
a__ : str = False
a__ : Any = False
def a ( self : Optional[Any] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def a ( self : int ):
pass
def a ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : str ):
return
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def a ( self : List[Any] ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def a ( self : str ):
pass
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def a ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def a ( self : Optional[Any] ):
pass
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple ):
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a ( self : str ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def a ( self : Any ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : str ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : Tuple ):
pass
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase : List[str] ):
__UpperCAmelCase = 0
return t
def check_equivalence(_lowercase : List[Any] , _lowercase : Any , _lowercase : str , _lowercase : List[str]={} ):
with torch.no_grad():
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase )
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase : Dict , _lowercase : Optional[Any] ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has'''
F''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
@require_torch
class _UpperCAmelCase ( unittest.TestCase , _lowerCAmelCase ):
a__ : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ : List[str] = MaskFormerSwinConfig
def a ( self : List[str] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
def a ( self : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__UpperCAmelCase = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
__UpperCAmelCase = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__UpperCAmelCase = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__UpperCAmelCase = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 86
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
SCREAMING_SNAKE_CASE = "\nHuman: <<task>>\n\nAssistant: "
SCREAMING_SNAKE_CASE = "huggingface-tools/default-prompts"
SCREAMING_SNAKE_CASE = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , lowercase_ ) is not None:
return prompt_or_repo_id
A__ = cached_file(
lowercase_ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(lowercase_ , "r" , encoding="utf-8" ) as f:
return f.read()
| 247
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''codegen'''
lowercase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , snake_case_ : int=50_400 , snake_case_ : Any=2_048 , snake_case_ : List[str]=2_048 , snake_case_ : List[Any]=4_096 , snake_case_ : Union[str, Any]=28 , snake_case_ : List[str]=16 , snake_case_ : Dict=64 , snake_case_ : Dict=None , snake_case_ : List[Any]="gelu_new" , snake_case_ : List[str]=0.0 , snake_case_ : int=0.0 , snake_case_ : List[str]=0.0 , snake_case_ : Optional[Any]=1e-5 , snake_case_ : Optional[int]=0.02 , snake_case_ : Dict=True , snake_case_ : Union[str, Any]=50_256 , snake_case_ : Optional[Any]=50_256 , snake_case_ : Optional[Any]=False , **snake_case_ : Any , ) -> List[Any]:
'''simple docstring'''
A__ = vocab_size
A__ = n_ctx
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = rotary_dim
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
A__ = bos_token_id
A__ = eos_token_id
super().__init__(
bos_token_id=snake_case_ , eos_token_id=snake_case_ , tie_word_embeddings=snake_case_ , **snake_case_ )
class UpperCAmelCase_ ( A_ ):
def __init__( self : Any , snake_case_ : PretrainedConfig , snake_case_ : str = "default" , snake_case_ : List[PatchingSpec] = None , snake_case_ : bool = False , ) -> int:
'''simple docstring'''
super().__init__(snake_case_ , task=snake_case_ , patching_specs=snake_case_ , use_past=snake_case_ )
if not getattr(self._config , "pad_token_id" , snake_case_ ):
# TODO: how to do that better?
A__ = 0
@property
def __magic_name__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="inputs" )
A__ = {0: "batch", 1: "past_sequence + sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self._config.n_head
def __magic_name__ ( self : str , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
A__ = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
A__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__, A__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
A__ = common_inputs["attention_mask"]
if self.use_past:
A__ = ordered_inputs["attention_mask"].dtype
A__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return 13
| 247
| 1
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCamelCase :List[str] = logging.getLogger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
A_ : List[str] = None
def _a (self , lowercase ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
A_ : str = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ : Tuple = str(distributed_port + 1 )
A_ : int = dist.new_group(ranks=lowercase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _a (self ):
return dist.get_rank(group=self.process_group ) == 0
def _a (self , lowercase , lowercase , lowercase=torch.floataa ):
A_ : str = torch.empty(lowercase , dtype=lowercase )
dist.scatter(lowercase , src=0 , scatter_list=lowercase , group=self.process_group )
return target_tensor
def _a (self ):
A_ : Any = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ : int = next((addr for addr in addrs if addr.startswith("""e""" )) , lowercase )
return ifname
def _a (self , lowercase , lowercase ):
# single GPU training
if not dist.is_initialized():
A_, A_ : Dict = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
# distributed training
A_ : Any = dist.get_world_size(group=self.process_group )
# gather logic
A_ : str = None
if self._is_main():
A_ : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowercase )]
dist.gather(torch.tensor(lowercase ) , dst=0 , gather_list=lowercase , group=self.process_group )
# scatter logic
A_ : str = question_hidden_states.shape[0]
A_ : Optional[int] = []
A_ : Dict = []
if self._is_main():
assert len(lowercase ) == world_size
A_, A_ : Union[str, Any] = self._main_retrieve(torch.cat(lowercase ).numpy() , lowercase )
A_, A_ : Optional[int] = torch.tensor(lowercase ), torch.tensor(lowercase )
A_ : Tuple = self._chunk_tensor(lowercase , lowercase )
A_ : Any = self._chunk_tensor(lowercase , lowercase )
A_ : Tuple = self._scattered(lowercase , [n_queries, n_docs] , target_type=torch.intaa )
A_ : Optional[int] = self._scattered(lowercase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase )
| 135
|
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case__ ( self : List[Any],lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Dict )-> Any:
'''simple docstring'''
A__ = TextaTextGenerationPipeline(model=lowercase_,tokenizer=lowercase_ )
return generator, ["Something to write", "Something else"]
def snake_case__ ( self : int,lowercase_ : List[str],lowercase_ : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = generator('Something there' )
self.assertEqual(lowercase_,[{'generated_text': ANY(lowercase_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
A__ = generator(['This is great !', 'Something else'],num_return_sequences=2,do_sample=lowercase_ )
self.assertEqual(
lowercase_,[
[{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}],
[{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}],
],)
A__ = generator(
['This is great !', 'Something else'],num_return_sequences=2,batch_size=2,do_sample=lowercase_ )
self.assertEqual(
lowercase_,[
[{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}],
[{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}],
],)
with self.assertRaises(lowercase_ ):
generator(4 )
@require_torch
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
A__ = pipeline('text2text-generation',model='patrickvonplaten/t5-tiny-random',framework='pt' )
# do_sample=False necessary for reproducibility
A__ = generator('Something there',do_sample=lowercase_ )
self.assertEqual(lowercase_,[{'generated_text': ''}] )
A__ = 3
A__ = generator(
'Something there',num_return_sequences=lowercase_,num_beams=lowercase_,)
A__ = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(lowercase_,lowercase_ )
A__ = generator('This is a test',do_sample=lowercase_,num_return_sequences=2,return_tensors=lowercase_ )
self.assertEqual(
lowercase_,[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],)
A__ = generator.model.config.eos_token_id
A__ = '<pad>'
A__ = generator(
['This is a test', 'This is a second test'],do_sample=lowercase_,num_return_sequences=2,batch_size=2,return_tensors=lowercase_,)
self.assertEqual(
lowercase_,[
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
],)
@require_tf
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
A__ = pipeline('text2text-generation',model='patrickvonplaten/t5-tiny-random',framework='tf' )
# do_sample=False necessary for reproducibility
A__ = generator('Something there',do_sample=lowercase_ )
self.assertEqual(lowercase_,[{'generated_text': ''}] )
| 7
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase ( A ):
def __init__( self : Optional[Any] , *__lowercase : str , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
__lowercase ={}
def snake_case ( self : Union[str, Any] , __lowercase : List[Any] , *__lowercase : Optional[int] , **__lowercase : int ):
"""simple docstring"""
__lowercase =super().add_tokens(__lowercase , *__lowercase , **__lowercase )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self : int , __lowercase : List[Any] , *__lowercase : Union[str, Any] , __lowercase : Dict=1 , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =[]
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
else:
__lowercase =[]
for i in range(__lowercase ):
__lowercase =placeholder_token + f'''_{i}'''
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
__lowercase =output
def snake_case ( self : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int]=False , __lowercase : Optional[int]=1.0 ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
__lowercase =[]
for i in range(len(__lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowercase =self.token_map[placeholder_token]
__lowercase =tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
__lowercase =copy.copy(__lowercase )
random.shuffle(__lowercase )
__lowercase =text.replace(__lowercase , ' '.join(__lowercase ) )
return text
def __call__( self : int , __lowercase : List[Any] , *__lowercase : Tuple , __lowercase : Optional[Any]=False , __lowercase : Dict=1.0 , **__lowercase : List[Any] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
def snake_case ( self : Dict , __lowercase : List[str] , *__lowercase : Tuple , __lowercase : Dict=False , __lowercase : List[str]=1.0 , **__lowercase : Optional[int] ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
| 141
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (32, 32)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def __A ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def __A ( self ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __A ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE = unet.half()
SCREAMING_SNAKE_CASE = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='np' , ).images
SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __A ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , output_type='np' , )
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 38
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'MLukeTokenizer'
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE = state_dict[bias_name]
SCREAMING_SNAKE_CASE = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE = state_dict[key]
else:
SCREAMING_SNAKE_CASE = state_dict[key]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' )
SCREAMING_SNAKE_CASE = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE = (0, 9)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE = (24, 30)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
SCREAMING_SNAKE_CASE = {}
for entry in data:
SCREAMING_SNAKE_CASE = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE = entity_id
break
SCREAMING_SNAKE_CASE = F'{language}:{entity_name}'
SCREAMING_SNAKE_CASE = entity_id
return new_mapping
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 38
| 1
|
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __SCREAMING_SNAKE_CASE (_UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
__a =1
@register_to_config
def __init__( self : Optional[int] , __a : List[Any]=20_00 , __a : List[Any]=0.1 , __a : Optional[int]=20 , __a : Optional[Any]=1e-3 ):
_a = None
_a = None
_a = None
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[Any] , __a : List[str] = None ):
_a = torch.linspace(1 , self.config.sampling_eps , _UpperCAmelCase , device=_UpperCAmelCase )
def UpperCamelCase__ ( self : Tuple , __a : str , __a : Optional[Any] , __a : Any , __a : int=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_a = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_a = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_a = std.flatten()
while len(std.shape ) < len(score.shape ):
_a = std.unsqueeze(-1 )
_a = -score / std
# compute
_a = -1.0 / len(self.timesteps )
_a = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_a = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_a = beta_t.unsqueeze(-1 )
_a = -0.5 * beta_t * x
_a = torch.sqrt(_UpperCAmelCase )
_a = drift - diffusion**2 * score
_a = x + drift * dt
# add noise
_a = randn_tensor(x.shape , layout=x.layout , generator=_UpperCAmelCase , device=x.device , dtype=x.dtype )
_a = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[int] ):
return self.config.num_train_timesteps
| 63
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 177
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __UpperCamelCase :
SCREAMING_SNAKE_CASE = PegasusConfig
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = "gelu"
def __init__(self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=1_3 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Any=9_9 , __SCREAMING_SNAKE_CASE : Optional[int]=3_2 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : Tuple=3_7 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=4_0 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
A = tf.concat([input_ids, eos_tensor] , axis=1)
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict):
A = TFPegasusModel(config=__SCREAMING_SNAKE_CASE).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE)
A , A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size)
A = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1)
A = tf.concat([attention_mask, next_attn_mask] , axis=-1)
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1]))
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3)
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
"""simple docstring"""
if attention_mask is None:
A = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCamelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = TFPegasusModelTester(self)
A = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
SCREAMING_SNAKE_CASE = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
SCREAMING_SNAKE_CASE = "google/pegasus-xsum"
@cached_property
def SCREAMING_SNAKE_CASE__ (self : int):
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def SCREAMING_SNAKE_CASE__ (self : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int]):
A = self.translate_src_text(**__SCREAMING_SNAKE_CASE)
assert self.expected_text == generated_words
def SCREAMING_SNAKE_CASE__ (self : Tuple , **__SCREAMING_SNAKE_CASE : Optional[Any]):
A = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="tf")
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__SCREAMING_SNAKE_CASE , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__SCREAMING_SNAKE_CASE)
return generated_words
@slow
def SCREAMING_SNAKE_CASE__ (self : int):
self._assert_generated_batch_equal_expected()
| 57
|
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__(self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str):
A , A = text, pattern
A , A = len(__SCREAMING_SNAKE_CASE), len(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : int):
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
# searches pattern in text and returns index positions
A = []
for i in range(self.textLen - self.patLen + 1):
A = self.mismatch_in_text(__SCREAMING_SNAKE_CASE)
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE)
else:
A = self.match_in_pattern(self.text[mismatch_index])
A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__A : int = 'ABAABA'
__A : Optional[Any] = 'AB'
__A : Any = BoyerMooreSearch(text, pattern)
__A : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 57
| 1
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = 1, 1
__lowerCAmelCase : int = 2
while True:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : List[str] = fa + fa
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = fa, f
index += 1
for _ in str(_UpperCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 86
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size
| 86
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = SamImageProcessor()
__lowerCamelCase = SamProcessor(a )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_image_processor(do_normalize=a , padding_value=1.0 )
__lowerCamelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(a , return_tensors='''np''' )
__lowerCamelCase = processor(images=a , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = [torch.ones((1, 3, 5, 5) )]
__lowerCamelCase = [[17_64, 26_46]]
__lowerCamelCase = [[6_83, 10_24]]
__lowerCamelCase = processor.post_process_masks(a , a , a )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCamelCase = processor.post_process_masks(
a , torch.tensor(a ) , torch.tensor(a ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowerCamelCase = [np.ones((1, 3, 5, 5) )]
__lowerCamelCase = processor.post_process_masks(a , np.array(a ) , np.array(a ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCamelCase = [[1, 0], [0, 1]]
with self.assertRaises(a ):
__lowerCamelCase = processor.post_process_masks(a , np.array(a ) , np.array(a ) )
@require_vision
@require_tf
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = SamImageProcessor()
__lowerCamelCase = SamProcessor(a )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_image_processor(do_normalize=a , padding_value=1.0 )
__lowerCamelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(a , return_tensors='''np''' )
__lowerCamelCase = processor(images=a , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = [tf.ones((1, 3, 5, 5) )]
__lowerCamelCase = [[17_64, 26_46]]
__lowerCamelCase = [[6_83, 10_24]]
__lowerCamelCase = processor.post_process_masks(a , a , a , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCamelCase = processor.post_process_masks(
a , tf.convert_to_tensor(a ) , tf.convert_to_tensor(a ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowerCamelCase = [np.ones((1, 3, 5, 5) )]
__lowerCamelCase = processor.post_process_masks(
a , np.array(a ) , np.array(a ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCamelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowerCamelCase = processor.post_process_masks(
a , np.array(a ) , np.array(a ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = SamImageProcessor()
__lowerCamelCase = SamProcessor(a )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str , **a : Any ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowerCamelCase = [tf.convert_to_tensor(a )]
__lowerCamelCase = [torch.tensor(a )]
__lowerCamelCase = [[17_64, 26_46]]
__lowerCamelCase = [[6_83, 10_24]]
__lowerCamelCase = processor.post_process_masks(
a , a , a , return_tensors='''tf''' )
__lowerCamelCase = processor.post_process_masks(
a , a , a , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(a , return_tensors='''pt''' )['''pixel_values'''].numpy()
__lowerCamelCase = processor(images=a , return_tensors='''pt''' )['''pixel_values'''].numpy()
__lowerCamelCase = image_processor(a , return_tensors='''tf''' )['''pixel_values'''].numpy()
__lowerCamelCase = processor(images=a , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(a , a ) )
self.assertTrue(np.allclose(a , a ) )
self.assertTrue(np.allclose(a , a ) )
| 237
|
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Any , *,
a : int = 4 , a : int = 7_68 , a : int , a : Optional[int] , ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Parameter(torch.zeros(a ) )
# parameters for additional clip time embeddings
__lowerCamelCase = nn.Linear(a , a )
__lowerCamelCase = nn.Linear(a , a )
# parameters for encoder hidden states
__lowerCamelCase = clip_extra_context_tokens
__lowerCamelCase = nn.Linear(
a , self.clip_extra_context_tokens * cross_attention_dim )
__lowerCamelCase = nn.Linear(a , a )
__lowerCamelCase = nn.LayerNorm(a )
def SCREAMING_SNAKE_CASE__ ( self : int , *, a : Union[str, Any] , a : Any , a : str , a : Any ):
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowerCamelCase = image_embeddings.shape[0]
__lowerCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowerCamelCase = classifier_free_guidance_embeddings.expand(
a , -1 )
__lowerCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowerCamelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowerCamelCase = self.embedding_proj(a )
__lowerCamelCase = self.clip_image_embeddings_project_to_time_embeddings(a )
__lowerCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowerCamelCase = self.clip_extra_context_tokens_proj(a )
__lowerCamelCase = clip_extra_context_tokens.reshape(a , -1 , self.clip_extra_context_tokens )
__lowerCamelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowerCamelCase = self.encoder_hidden_states_proj(a )
__lowerCamelCase = self.text_encoder_hidden_states_norm(a )
__lowerCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 237
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : str=7 , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=99 , UpperCAmelCase : str=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : str=4 , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=128 , UpperCAmelCase : Dict=32 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : Any=3 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : int=None , ):
__lowerCamelCase : Dict = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Tuple = use_input_mask
__lowerCamelCase : Optional[Any] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : List[Any] = type_vocab_size
__lowerCamelCase : Tuple = type_sequence_label_size
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : Dict = num_choices
__lowerCamelCase : Optional[Any] = scope
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : List[str] = None
if self.use_input_mask:
__lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
__lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : List[str] = None
__lowerCamelCase : List[str] = None
__lowerCamelCase : Tuple = None
if self.use_labels:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Dict ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Union[str, Any] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase : int = True
__lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : int = NezhaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
__lowerCamelCase : int = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : int , ):
__lowerCamelCase : int = True
__lowerCamelCase : List[Any] = NezhaModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : int = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
__lowerCamelCase : int = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
__lowerCamelCase : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : Any = NezhaForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : Dict = NezhaForNextSentencePrediction(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : Optional[Any] = NezhaForPreTraining(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Optional[int] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , next_sentence_label=UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ):
__lowerCamelCase : Union[str, Any] = NezhaForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : List[str] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.num_labels
__lowerCamelCase : str = NezhaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ):
__lowerCamelCase : Optional[Any] = self.num_labels
__lowerCamelCase : Dict = NezhaForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : str = self.num_choices
__lowerCamelCase : List[Any] = NezhaForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : List[str] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=False ):
__lowerCamelCase : Union[str, Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCamelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : int = NezhaModelTester(self )
__lowerCamelCase : Dict = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : int ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def lowerCamelCase__ ( self : Tuple ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = NezhaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Tuple = model_class(config=UpperCAmelCase )
__lowerCamelCase : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : str = torch.jit.trace(
UpperCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase , os.path.join(UpperCAmelCase , "bert.pt" ) )
__lowerCamelCase : Tuple = torch.jit.load(os.path.join(UpperCAmelCase , "bert.pt" ) , map_location=UpperCAmelCase )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase ) , inputs_dict["attention_mask"].to(UpperCAmelCase ) )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[Any] = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
__lowerCamelCase : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCamelCase : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Union[str, Any] = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
__lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCamelCase : Optional[int] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 135
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__A = get_tests_dir('''fixtures''')
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : List[str] = mock.Mock()
__lowerCamelCase : Any = 500
__lowerCamelCase : int = {}
__lowerCamelCase : Optional[Any] = HTTPError
__lowerCamelCase : List[str] = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : int = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase ) as mock_head:
__lowerCamelCase : str = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : Dict ):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : List[str] = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def lowerCamelCase__ ( self : str ):
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : Dict = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(UpperCAmelCase )
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls : List[str] ):
__lowerCamelCase : Any = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(UpperCAmelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
__lowerCamelCase : Tuple = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase , repo_id="test-image-processor" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCamelCase : int = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : List[Any] = ViTImageProcessor.from_pretrained(UpperCAmelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
__lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCamelCase : Any = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
CustomImageProcessor.register_for_auto_class()
__lowerCamelCase : int = CustomImageProcessor.from_pretrained(UpperCAmelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
__lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 135
| 1
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BartphoTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :Dict = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
a :Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Tuple = {'''unk_token''': '''<unk>'''}
a :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a :Any = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :int = '''This is a là test'''
a :str = '''This is a<unk><unk> test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
a :Optional[Any] = '''This is a là test'''
a :Tuple = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
a :int = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = tokens + [tokenizer.unk_token]
a :str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
| 281
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : str="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase_ : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase_ : List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a :Union[str, Any] = []
for i in range(UpperCAmelCase_ ):
a :Optional[Any] = i / num_diffusion_timesteps
a :int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
class _snake_case ( _snake_case , _snake_case ):
@register_to_config
def __init__( self , _lowerCamelCase = 1000 , _lowerCamelCase = "fixed_small_log" , _lowerCamelCase = True , _lowerCamelCase = 1.0 , _lowerCamelCase = "epsilon" , _lowerCamelCase = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
a :List[Any] = betas_for_alpha_bar(_lowerCamelCase )
a :Any = 1.0 - self.betas
a :int = torch.cumprod(self.alphas , dim=0 )
a :Union[str, Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
a :List[Any] = 1.0
# setable values
a :Optional[Any] = None
a :List[str] = torch.from_numpy(np.arange(0 , _lowerCamelCase )[::-1].copy() )
a :List[str] = variance_type
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
return sample
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Optional[int] = num_inference_steps
a :Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
a :int = (np.arange(0 , _lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
a :List[str] = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
if prev_timestep is None:
a :Union[str, Any] = t - 1
a :Dict = self.alphas_cumprod[t]
a :str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a :Optional[Any] = 1 - alpha_prod_t
a :Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a :int = self.betas[t]
else:
a :Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a :str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
a :Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
a :Dict = torch.log(torch.clamp(_lowerCamelCase , min=1e-20 ) )
a :Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
a :List[Any] = variance.log()
a :Any = beta.log()
a :List[Any] = (predicted_variance + 1) / 2
a :Dict = frac * max_log + (1 - frac) * min_log
return variance
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = True , ):
a :Optional[int] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
a , a :Optional[int] = torch.split(_lowerCamelCase , sample.shape[1] , dim=1 )
else:
a :int = None
# 1. compute alphas, betas
if prev_timestep is None:
a :Any = t - 1
a :Tuple = self.alphas_cumprod[t]
a :Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a :Tuple = 1 - alpha_prod_t
a :List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a :Union[str, Any] = self.betas[t]
a :Optional[Any] = self.alphas[t]
else:
a :Dict = 1 - alpha_prod_t / alpha_prod_t_prev
a :Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a :List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a :List[str] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a :List[str] = torch.clamp(
_lowerCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a :List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
a :Any = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a :Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a :Dict = 0
if t > 0:
a :Optional[int] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_lowerCamelCase , device=model_output.device )
a :Dict = self._get_variance(
_lowerCamelCase , predicted_variance=_lowerCamelCase , prev_timestep=_lowerCamelCase , )
if self.variance_type == "fixed_small_log":
a :Optional[int] = variance
elif self.variance_type == "learned_range":
a :Union[str, Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
a :Optional[int] = variance * variance_noise
a :List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_lowerCamelCase , pred_original_sample=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
a :List[Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
a :Optional[Any] = timesteps.to(original_samples.device )
a :Tuple = alphas_cumprod[timesteps] ** 0.5
a :List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
a :Any = sqrt_alpha_prod.unsqueeze(-1 )
a :List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
a :Optional[int] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
a :List[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
a :str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 281
| 1
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bool:
"""simple docstring"""
UpperCamelCase :str = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 38
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ):
requires_backends(self , ["""bs4"""] )
super().__init__(**__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = []
UpperCamelCase :List[str] = []
UpperCamelCase :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
UpperCamelCase :Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _A ( self : Any , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" )
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Tuple = []
UpperCamelCase :Tuple = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Tuple = """"""
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : Any , __lowerCamelCase : Dict ):
UpperCamelCase :Any = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
UpperCamelCase :Any = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"""but is of type {type(__lowerCamelCase )}.""" )
UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
UpperCamelCase :Any = [html_strings]
# Get nodes + xpaths
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :str = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
UpperCamelCase :int = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths}
UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 38
| 1
|
def __lowerCAmelCase ( lowercase : str , lowercase : bool = False ) -> str:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
snake_case : Any = F'Expected string as input, found {type(lowercase )}'
raise ValueError(lowercase )
if not isinstance(lowercase , lowercase ):
snake_case : Tuple = F'Expected boolean as use_pascal parameter, found {type(lowercase )}'
raise ValueError(lowercase )
snake_case : Optional[Any] = input_str.split("_" )
snake_case : Dict = 0 if use_pascal else 1
snake_case : Tuple = words[start_index:]
snake_case : List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case : str = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 356
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( lowercase : int ) -> Tuple:
"""simple docstring"""
snake_case : Any = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$" , lowercase ).groups()[0]
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = file_names
snake_case : Optional[Any] = image_transform
snake_case : Optional[int] = label_to_id
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.file_names )
def __getitem__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : str = self.file_names[idx]
snake_case : Any = PIL.Image.open(UpperCamelCase__ )
snake_case : Optional[int] = raw_image.convert("RGB" )
if self.image_transform is not None:
snake_case : Optional[Any] = self.image_transform(UpperCamelCase__ )
snake_case : Optional[Any] = extract_label(UpperCamelCase__ )
if self.label_to_id is not None:
snake_case : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( lowercase : Any , lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if args.with_tracking:
snake_case : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
snake_case : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : str = config["lr"]
snake_case : Union[str, Any] = int(config["num_epochs"] )
snake_case : str = int(config["seed"] )
snake_case : str = int(config["batch_size"] )
snake_case : Any = config["image_size"]
if not isinstance(lowercase , (list, tuple) ):
snake_case : str = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
snake_case : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
snake_case : Any = int(args.checkpointing_steps )
else:
raise ValueError(
F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
snake_case : List[str] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
snake_case : Union[str, Any] = os.path.split(lowercase )[-1].split("." )[0]
accelerator.init_trackers(lowercase , lowercase )
# Grab all the image filenames
snake_case : int = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
snake_case : Union[str, Any] = [extract_label(lowercase ) for fname in file_names]
snake_case : Any = list(set(lowercase ) )
id_to_label.sort()
snake_case : int = {lbl: i for i, lbl in enumerate(lowercase )}
# Set the seed before splitting the data.
np.random.seed(lowercase )
torch.manual_seed(lowercase )
torch.cuda.manual_seed_all(lowercase )
# Split our filenames between train and validation
snake_case : Optional[Any] = np.random.permutation(len(lowercase ) )
snake_case : int = int(0.8 * len(lowercase ) )
snake_case : int = random_perm[:cut]
snake_case : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
snake_case : List[Any] = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] )
snake_case : List[str] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase )
# For evaluation, we use a deterministic Resize
snake_case : Optional[Any] = Compose([Resize(lowercase ), ToTensor()] )
snake_case : Optional[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase )
# Instantiate dataloaders.
snake_case : Optional[Any] = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
snake_case : Tuple = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Optional[int] = create_model("resnet50d" , pretrained=lowercase , num_classes=len(lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Any = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
snake_case : Dict = False
for param in model.get_classifier().parameters():
snake_case : List[Any] = True
# We normalize the batches of images to be a bit faster.
snake_case : Dict = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
snake_case : Union[str, Any] = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
snake_case : int = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
snake_case : Dict = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case ,snake_case ,snake_case ,snake_case ,snake_case : List[str] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
snake_case : List[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
snake_case : Optional[int] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
snake_case : List[str] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
snake_case : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
snake_case : int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
snake_case : Union[str, Any] = os.path.splitext(lowercase )[0]
if "epoch" in training_difference:
snake_case : Any = int(training_difference.replace("epoch_" , "" ) ) + 1
snake_case : int = None
else:
snake_case : Any = int(training_difference.replace("step_" , "" ) )
snake_case : Optional[int] = resume_step // len(lowercase )
resume_step -= starting_epoch * len(lowercase )
# Now we train the model
for epoch in range(lowercase , lowercase ):
model.train()
if args.with_tracking:
snake_case : Union[str, Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
snake_case : List[str] = accelerator.skip_first_batches(lowercase , lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
snake_case : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case : Optional[int] = (batch["image"] - mean) / std
snake_case : str = model(lowercase )
snake_case : Dict = torch.nn.functional.cross_entropy(lowercase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase , lowercase ):
snake_case : Any = F'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
snake_case : List[str] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
model.eval()
snake_case : List[str] = 0
snake_case : List[str] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case : int = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case : Tuple = (batch["image"] - mean) / std
with torch.no_grad():
snake_case : Optional[int] = model(lowercase )
snake_case : List[Any] = outputs.argmax(dim=-1 )
snake_case ,snake_case : int = accelerator.gather_for_metrics((predictions, batch["label"]) )
snake_case : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
snake_case : List[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(lowercase ),
"epoch": epoch,
} , step=lowercase , )
if checkpointing_steps == "epoch":
snake_case : Optional[Any] = F'epoch_{epoch}'
if args.output_dir is not None:
snake_case : Union[str, Any] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=lowercase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=lowercase , default=lowercase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=lowercase , default=lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
snake_case : Optional[Any] = parser.parse_args()
snake_case : List[str] = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 112
| 0
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] ="""pixel_values"""
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Union[str, Any] =TimmBackboneConfig
def __init__( self , __a , **__a ):
requires_backends(self , "timm" )
super().__init__(__a )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f"backbone {config.backbone} is not supported by timm." )
if hasattr(__a , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
__lowerCAmelCase = getattr(__a , "use_pretrained_backbone" , __a )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(__a , "out_indices" , __a ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=__a , features_only=config.features_only , in_chans=config.num_channels , out_indices=__a , **__a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer["module"]: str(__a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__a )
@classmethod
def snake_case ( cls , __a , *__a , **__a ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop("config" , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop("use_timm_backbone" , __a )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
__lowerCAmelCase = kwargs.pop("num_channels" , config.num_channels )
__lowerCAmelCase = kwargs.pop("features_only" , config.features_only )
__lowerCAmelCase = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop("out_indices" , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=__a , num_channels=__a , features_only=__a , use_pretrained_backbone=__a , out_indices=__a , )
return super()._from_config(__a , **__a )
def snake_case ( self , __a ):
pass
def snake_case ( self , __a , __a=None , __a=None , __a=None , **__a ):
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(__a , **__a )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(__a , **__a )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(__a )
__lowerCAmelCase = tuple(__a ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__a , hidden_states=__a , attentions=__a )
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
"""simple docstring"""
lowercase__ = tuple[float, float, float]
lowercase__ = tuple[float, float, float]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Union[str, Any] = end_pointa[0] - end_pointa[0]
a__: Union[str, Any] = end_pointa[1] - end_pointa[1]
a__: str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: Any = ab[1] * ac[2] - ab[2] * ac[1] # *i
a__: Tuple = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
a__: Any = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
return tuple(round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in vector ) == (0, 0, 0)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10 ) ->Tuple:
a__: Any = create_vector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Optional[int] = create_vector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_zero_vector(get_ad_vectors_cross(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
| 359
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: Tuple = {}
a__: Tuple = job['started_at']
a__: int = job['completed_at']
a__: Any = date_parser.parse(_SCREAMING_SNAKE_CASE )
a__: Tuple = date_parser.parse(_SCREAMING_SNAKE_CASE )
a__: str = round((end_datetime - start_datetime).total_seconds() / 60.0 )
a__: Any = start
a__: Dict = end
a__: Optional[int] = duration_in_min
return job_info
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: Tuple = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: int = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result['jobs']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: str = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_time.update({job['name']: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result['jobs']} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
lowercase__ = parser.parse_args()
lowercase__ = get_job_time(args.workflow_run_id)
lowercase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"{k}: {v['duration']}")
| 203
| 0
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : Optional[int] =16
__lowerCAmelCase : Tuple =32
def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : DatasetDict , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , _lowerCamelCase : int = 16 ):
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = DatasetDict(
{
"train": dataset["train"].select(_lowerCamelCase ),
"validation": dataset["train"].select(_lowerCamelCase ),
"test": dataset["validation"],
} )
def tokenize_function(_lowerCamelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["test"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
# New Code #
A__ = []
# Download the dataset
A__ = load_dataset("glue" , "mrpc" )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowerCamelCase ):
A__, A__, A__ = get_fold_dataloaders(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__, A__, A__, A__, A__ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_lowerCamelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowerCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(_lowerCamelCase , dim=0 )
A__ = torch.stack(_lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase )
accelerator.print("Average test metrics from all folds:" , _lowerCamelCase )
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=_lowerCamelCase , default=3 , help="The number of splits to perform across the dataset" )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 237
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCAmelCase : Optional[Any] ={
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """facebook/nllb-200-distilled-600M"""
__lowercase = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__lowercase = """translator"""
__lowercase = AutoTokenizer
__lowercase = AutoModelForSeqaSeqLM
__lowercase = LANGUAGE_CODES
__lowercase = ["""text""", """text""", """text"""]
__lowercase = ["""text"""]
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :int )-> str:
if src_lang not in self.lang_to_code:
raise ValueError(F"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"{tgt_lang} is not a supported language." )
A__ = self.lang_to_code[src_lang]
A__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase_ , return_tensors="pt" , src_lang=lowercase_ , tgt_lang=lowercase_ )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Any )-> int:
return self.model.generate(**lowercase_ )
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[Any] )-> str:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase_ )
| 237
| 1
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if height >= 1:
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
move_disk(__UpperCamelCase , __UpperCamelCase )
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str:
print("""moving disk from""" , __UpperCamelCase , """to""" , __UpperCamelCase )
def _lowerCamelCase() -> Optional[Any]:
_lowerCAmelCase =int(input("""Height of hanoi: """ ).strip() )
move_tower(__UpperCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 341
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : str = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'pegasus'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _a=50_265 , _a=1_024 , _a=12 , _a=4_096 , _a=16 , _a=12 , _a=4_096 , _a=16 , _a=0.0 , _a=0.0 , _a=True , _a=True , _a="gelu" , _a=1_024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=0 , _a=False , _a=0 , _a=1 , _a=1 , **_a , ):
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : Tuple = encoder_ffn_dim
__magic_name__ : Union[str, Any] = encoder_layers
__magic_name__ : List[Any] = encoder_attention_heads
__magic_name__ : Any = decoder_ffn_dim
__magic_name__ : int = decoder_layers
__magic_name__ : str = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : List[Any] = attention_dropout
__magic_name__ : Dict = activation_dropout
__magic_name__ : Any = activation_function
__magic_name__ : str = init_std
__magic_name__ : Union[str, Any] = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : List[str] = use_cache
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.d_model
| 281
|
snake_case : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCAmelCase_ ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Tuple = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_snake_case )
__magic_name__ : Optional[int] = "".join(bin(_snake_case )[2:].zfill(8 ) for byte in data )
__magic_name__ : List[Any] = len(_snake_case ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ : List[str] = B"=" * ((6 - len(_snake_case ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_snake_case ) % 6)
else:
__magic_name__ : List[str] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_snake_case ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( _snake_case : str ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ) and not isinstance(_snake_case , _snake_case ):
__magic_name__ : List[str] = (
"argument should be a bytes-like object or ASCII string, "
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_snake_case )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_snake_case , _snake_case ):
try:
__magic_name__ : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
__magic_name__ : List[str] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_snake_case ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ : Optional[int] = encoded_data[:-padding]
__magic_name__ : Dict = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ : Union[str, Any] = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_snake_case ) , 8 )
]
return bytes(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
| 1
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Any = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ )
snake_case : Optional[int] = -1
snake_case : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case : Tuple = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case : str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : int = TextStreamer(lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : str = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Tuple:
'''simple docstring'''
snake_case : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ )
snake_case : int = -1
snake_case : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case : Any = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case : str = tokenizer.decode(greedy_ids[0] )
snake_case : Any = TextIteratorStreamer(lowercase_ )
snake_case : List[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
snake_case : List[str] = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
snake_case : Dict = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ )
snake_case : int = -1
snake_case : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case : int = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
snake_case : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : Dict = TextStreamer(lowercase_ , skip_prompt=lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : Optional[int] = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("distilgpt2" )
snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(lowercase_ )
snake_case : Optional[int] = -1
snake_case : Optional[int] = torch.ones((1, 5) , device=lowercase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case : Optional[Any] = TextStreamer(lowercase_ , skip_special_tokens=lowercase_ )
model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case : Optional[int] = cs.out[:-1] # Remove the final "\n"
snake_case : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ )
snake_case : Any = -1
snake_case : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case : List[Any] = TextIteratorStreamer(lowercase_ , timeout=0.001 )
snake_case : int = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
snake_case : Union[str, Any] = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase_ ):
snake_case : Optional[Any] = ""
for new_text in streamer:
streamer_text += new_text
| 355
|
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
| 0
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def a_ ( _lowerCAmelCase : SplitDict ):
'''simple docstring'''
lowercase__ : List[str] = split_dict._to_yaml_list()
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
lowercase__ : Any = SplitDict._from_yaml_list(_lowerCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowercase__ : List[Any] = None
# the split name of split_dict takes over the name of the split info object
lowercase__ : Dict = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_lowerCamelCase ), SplitInfo(dataset_name='my_dataset' )] )
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Optional[Any] = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 77
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: list[int] , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[Any] = list(range(len(_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = [v / w for v, w in zip(_lowerCamelCase , _lowerCamelCase )]
index.sort(key=lambda _lowerCamelCase : ratio[i] , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : list[float] = [0] * len(_lowerCamelCase )
for i in index:
if weight[i] <= capacity:
__SCREAMING_SNAKE_CASE : str = 1
max_value += value[i]
capacity -= weight[i]
else:
__SCREAMING_SNAKE_CASE : int = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 0
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a : List[Any] = logging.getLogger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
# save results
if os.path.exists(_lowercase ):
if os.path.exists(os.path.join(_lowercase , """config.json""" ) ) and os.path.isfile(
os.path.join(_lowercase , """config.json""" ) ):
os.remove(os.path.join(_lowercase , """config.json""" ) )
if os.path.exists(os.path.join(_lowercase , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(_lowercase , """pytorch_model.bin""" ) ):
os.remove(os.path.join(_lowercase , """pytorch_model.bin""" ) )
else:
os.makedirs(_lowercase )
model.save_pretrained(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> List[Any]:
UpperCAmelCase : Tuple = 2
if unlogit:
UpperCAmelCase : Any = torch.pow(_lowercase , _lowercase )
UpperCAmelCase : Any = p * torch.log(_lowercase )
UpperCAmelCase : List[Any] = 0
return -plogp.sum(dim=-1 )
def __lowerCamelCase ( _lowercase ) -> Any:
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(_lowercase ) ) ) )
for row in range(len(_lowercase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase=True , _lowercase=None , _lowercase=False ) -> int:
UpperCAmelCase , UpperCAmelCase : Any = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase : int = torch.zeros(_lowercase , _lowercase ).to(args.device )
UpperCAmelCase : Union[str, Any] = torch.zeros(_lowercase , _lowercase ).to(args.device )
if head_mask is None:
UpperCAmelCase : Optional[Any] = torch.ones(_lowercase , _lowercase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowercase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[str] = 0.0
UpperCAmelCase : List[Any] = 0.0
for step, inputs in enumerate(tqdm(_lowercase , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase : List[Any] = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase : Optional[int] = model(_lowercase , labels=_lowercase , head_mask=_lowercase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowercase ):
UpperCAmelCase : Union[str, Any] = entropy(attn.detach() , _lowercase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowercase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : Any = torch.pow(torch.pow(_lowercase , _lowercase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(_lowercase )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(_lowercase )
logger.info("""Head ranked by importance scores""" )
UpperCAmelCase : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase : Tuple = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase : Any = head_ranks.view_as(_lowercase )
print_ad_tensor(_lowercase )
return attn_entropy, head_importance, total_loss
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = compute_heads_importance(_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase )
UpperCAmelCase : List[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , _lowercase , original_score * args.masking_threshold )
UpperCAmelCase : Optional[Any] = torch.ones_like(_lowercase )
UpperCAmelCase : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase : Dict = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase : Tuple = float("""Inf""" )
UpperCAmelCase : List[str] = head_importance.view(-1 ).sort()[1]
if len(_lowercase ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
UpperCAmelCase : Tuple = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase : Tuple = new_head_mask.view(-1 )
UpperCAmelCase : Dict = 0.0
UpperCAmelCase : Optional[int] = new_head_mask.view_as(_lowercase )
UpperCAmelCase : Any = new_head_mask.clone().detach()
print_ad_tensor(_lowercase )
# Compute metric and head importance again
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , head_mask=_lowercase )
UpperCAmelCase : Optional[int] = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , _lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info("""Final head mask""" )
print_ad_tensor(_lowercase )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = datetime.now()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , compute_importance=_lowercase , head_mask=_lowercase )
UpperCAmelCase : List[Any] = 1 / loss
UpperCAmelCase : List[str] = datetime.now() - before_time
UpperCAmelCase : str = sum(p.numel() for p in model.parameters() )
UpperCAmelCase : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowercase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Optional[int] = [
v,
]
assert sum(len(_lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowercase )
UpperCAmelCase : Optional[int] = sum(p.numel() for p in model.parameters() )
UpperCAmelCase : Any = datetime.now()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , compute_importance=_lowercase , head_mask=_lowercase , actually_pruned=_lowercase , )
UpperCAmelCase : List[Any] = 1 / loss
UpperCAmelCase : Optional[int] = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , _lowercase , _lowercase , pruned_num_params / original_num_params * 1_0_0 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , _lowercase , _lowercase )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_0_0 )
save_model(_lowercase , args.output_dir )
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=_lowercase , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=_lowercase , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=_lowercase , type=_lowercase , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=_lowercase , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=_lowercase , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=_lowercase , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=_lowercase , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=_lowercase , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=_lowercase , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=_lowercase , default=4_2 )
parser.add_argument("""--local_rank""" , type=_lowercase , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=_lowercase , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=_lowercase , default="""""" , help="""Can be used for distant debugging.""" )
UpperCAmelCase : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
UpperCAmelCase : Dict = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase : Tuple = torch.device("""cuda""" , args.local_rank )
UpperCAmelCase : str = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase : Tuple = nn.parallel.DistributedDataParallel(
_lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowercase )
elif args.n_gpu > 1:
UpperCAmelCase : Dict = nn.DataParallel(_lowercase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowercase )
torch.save(_lowercase , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , _lowercase )
# Prepare dataset
UpperCAmelCase : List[str] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase : str = (torch.from_numpy(_lowercase ),)
UpperCAmelCase : Union[str, Any] = TensorDataset(*_lowercase )
UpperCAmelCase : List[str] = RandomSampler(_lowercase )
UpperCAmelCase : Any = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowercase , _lowercase , _lowercase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase : Optional[Any] = mask_heads(_lowercase , _lowercase , _lowercase )
prune_heads(_lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 338
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase_ :
lowercase = MBartConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Optional[Any] = eos_token_id
UpperCAmelCase : List[str] = pad_token_id
UpperCAmelCase : List[Any] = bos_token_id
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[Any] = prepare_mbart_inputs_dict(A , A , A )
return config, inputs_dict
def _lowercase( self , A , A ) -> List[str]:
UpperCAmelCase : List[str] = TFMBartModel(config=A ).get_decoder()
UpperCAmelCase : int = inputs_dict["""input_ids"""]
UpperCAmelCase : str = input_ids[:1, :]
UpperCAmelCase : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase : List[str] = inputs_dict["""head_mask"""]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , head_mask=A , use_cache=A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = outputs.to_tuple()
UpperCAmelCase : int = past_key_values[1]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[str]:
if attention_mask is None:
UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def _lowercase( self , A , A , A , A , A ) -> int:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = TFMBartModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A )
def _lowercase( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Dict:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase = 'facebook/mbart-large-en-ro'
@cached_property
def _lowercase( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase( self , **A ) -> Any:
UpperCAmelCase : Optional[int] = self.translate_src_text(**A )
self.assertListEqual(self.expected_text , A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.tokenizer(self.src_text , **A , return_tensors="""tf""" )
UpperCAmelCase : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase : Any = self.tokenizer.batch_decode(A , skip_special_tokens=A )
return generated_words
@slow
def _lowercase( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 338
| 1
|
import argparse
import json
from tqdm import tqdm
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
_lowercase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__snake_case , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__snake_case , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__snake_case , help='''where to store parsed gold_data_path file''' , )
_lowercase =parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowercase =json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
_lowercase =dpr_record['''question''']
_lowercase =[context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__snake_case ) + '''\n''' )
if __name__ == "__main__":
main()
| 5
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__snake_case = logging.get_logger(__name__)
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case : Optional[int] = json.loads(lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case : Optional[int] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case : Any = json.loads(lowercase )
if not mpi_options.get("sagemaker_mpi_enabled" , lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCamelCase__ , )
@cached_property
def lowerCamelCase ( self ) -> "torch.device":
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
snake_case : Optional[Any] = torch.device("cpu" )
snake_case : List[Any] = 0
elif is_sagemaker_model_parallel_available():
snake_case : Tuple = smp.local_rank()
snake_case : int = torch.device("cuda" , UpperCamelCase__ )
snake_case : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
snake_case : Any = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
snake_case : Optional[Any] = torch.device("cuda" , self.local_rank )
snake_case : str = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case : Optional[Any] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
snake_case : Any = torch.device("cuda" , self.local_rank )
snake_case : Dict = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase__ )
return device
@property
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
return False
| 203
| 0
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = [0] * len(lowerCAmelCase__ )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase__ )
while queue:
UpperCAmelCase_ = queue.pop(0 )
cnt += 1
topo.append(lowerCAmelCase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCAmelCase__ )
if cnt != len(lowerCAmelCase__ ):
print("Cycle exists" )
else:
print(lowerCAmelCase__ )
# Adjacency List of Graph
lowerCamelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 360
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.