code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : List[str] = ["""image_processor""", """tokenizer"""]
_A : Tuple = """BridgeTowerImageProcessor"""
_A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__(self , lowercase__ , lowercase__ ):
super().__init__(lowercase__ , lowercase__ )
def __call__(self , lowercase__ , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
snake_case_ : Optional[int] = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel_values + pixel_mask
snake_case_ : List[Any] = self.image_processor(
lowercase__ , return_tensors=lowercase__ , do_normalize=lowercase__ , do_center_crop=lowercase__ , **lowercase__ )
encoding.update(lowercase__ )
return encoding
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.tokenizer.model_input_names
snake_case_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 480
|
"""simple docstring"""
a_ = 256
# Modulus to hash a string
a_ = 1000003
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : str = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = len(SCREAMING_SNAKE_CASE__ )
if p_len > t_len:
return False
snake_case_ : str = 0
snake_case_ : Dict = 0
snake_case_ : List[str] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ : int = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[int] = """abc1abc12"""
snake_case_ : Any = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
snake_case_ : List[str] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 2)
snake_case_ : Union[str, Any] = """ABABX"""
snake_case_ : Optional[Any] = """ABABZABABYABABX"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 3)
snake_case_ : Union[str, Any] = """AAAB"""
snake_case_ : Union[str, Any] = """ABAAAAAB"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 4)
snake_case_ : Optional[Any] = """abcdabcy"""
snake_case_ : Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 5)
snake_case_ : List[str] = """Lü"""
snake_case_ : Optional[int] = """Lüsai"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = """Lue"""
assert not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 480
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a__ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase , cache_dir=lowerCamelCase )
a__ = [t[-1] for t in os.walk(os.path.join(lowerCamelCase , os.listdir(lowerCamelCase )[0] , "snapshots" ) )]
a__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
def __a ( self : List[str] ):
'''simple docstring'''
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase )
a__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ = jax.random.PRNGKey(0 )
a__ = 4
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
a__ = replicate(lowerCamelCase )
a__ = jax.random.split(lowerCamelCase , lowerCamelCase )
a__ = shard(lowerCamelCase )
a__ = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
a__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase ) == num_samples
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowerCamelCase )
a__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ = jax.random.PRNGKey(0 )
a__ = 5_0
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
a__ = replicate(lowerCamelCase )
a__ = jax.random.split(lowerCamelCase , lowerCamelCase )
a__ = shard(lowerCamelCase )
a__ = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def __a ( self : Any ):
'''simple docstring'''
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase )
a__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ = jax.random.PRNGKey(0 )
a__ = 5_0
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
a__ = replicate(lowerCamelCase )
a__ = jax.random.split(lowerCamelCase , lowerCamelCase )
a__ = shard(lowerCamelCase )
a__ = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def __a ( self : List[str] ):
'''simple docstring'''
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
a__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ = jax.random.PRNGKey(0 )
a__ = 5_0
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
a__ = replicate(lowerCamelCase )
a__ = jax.random.split(lowerCamelCase , lowerCamelCase )
a__ = shard(lowerCamelCase )
a__ = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def __a ( self : int ):
'''simple docstring'''
a__ = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , )
a__ = scheduler.create_state()
a__ = scheduler_state
a__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ = jax.random.PRNGKey(0 )
a__ = 5_0
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
a__ = replicate(lowerCamelCase )
a__ = jax.random.split(lowerCamelCase , lowerCamelCase )
a__ = shard(lowerCamelCase )
a__ = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase )
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , )
a__ = replicate(lowerCamelCase )
a__ = pipeline.prepare_inputs(lowerCamelCase )
a__ = shard(lowerCamelCase )
a__ = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
a__ = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , use_memory_efficient_attention=lowerCamelCase , )
a__ = replicate(lowerCamelCase )
a__ = pipeline.prepare_inputs(lowerCamelCase )
a__ = shard(lowerCamelCase )
a__ = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
a__ = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 289
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase__ :
def __init__( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any=2 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : Tuple=7 , lowerCamelCase : Dict=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : int=True , lowerCamelCase : Any=True , lowerCamelCase : int=9_9 , lowerCamelCase : Tuple=3_6 , lowerCamelCase : List[Any]=3 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Any=3_7 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Union[str, Any]=5_1_2 , lowerCamelCase : Dict=1_6 , lowerCamelCase : int=2 , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : Tuple=6 , lowerCamelCase : Tuple=6 , lowerCamelCase : int=3 , lowerCamelCase : str=4 , lowerCamelCase : int=None , lowerCamelCase : Optional[int]=1_0_0_0 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = patch_size
a__ = text_seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = coordinate_size
a__ = shape_size
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ = text_seq_length
a__ = (image_size // patch_size) ** 2 + 1
a__ = self.text_seq_length + self.image_seq_length
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ = bbox[i, j, 3]
a__ = bbox[i, j, 1]
a__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ = bbox[i, j, 2]
a__ = bbox[i, j, 0]
a__ = t
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.text_seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self : Tuple , lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ):
'''simple docstring'''
a__ = LayoutLMvaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# text + image
a__ = model(lowerCamelCase , pixel_values=lowerCamelCase )
a__ = model(
lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )
a__ = model(lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , token_type_ids=lowerCamelCase )
a__ = model(lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a__ = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a__ = model(pixel_values=lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self : str , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
a__ = self.num_labels
a__ = LayoutLMvaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(
lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : int , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
a__ = self.num_labels
a__ = LayoutLMvaForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(
lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Any ):
'''simple docstring'''
a__ = LayoutLMvaForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(
lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Optional[int] ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : List[Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __a ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
'''simple docstring'''
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __a ( self : int ):
'''simple docstring'''
a__ = LayoutLMvaModelTester(self )
a__ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=3_7 )
def __a ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Tuple=False ):
'''simple docstring'''
a__ = copy.deepcopy(lowerCamelCase )
if model_class in get_values(lowerCamelCase ):
a__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase ):
a__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
elif model_class in get_values(lowerCamelCase ):
a__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
a__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
elif model_class in [
*get_values(lowerCamelCase ),
]:
a__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
elif model_class in [
*get_values(lowerCamelCase ),
]:
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase , )
return inputs_dict
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self : Tuple ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def __a ( self : Optional[int] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = LayoutLMvaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase () -> Dict:
a__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : int ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase ) if is_vision_available() else None
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCamelCase )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values.to(lowerCamelCase )
a__ = torch.tensor([[1, 2]] )
a__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a__ = model(
input_ids=input_ids.to(lowerCamelCase ) , bbox=bbox.to(lowerCamelCase ) , pixel_values=pixel_values.to(lowerCamelCase ) , )
# verify the logits
a__ = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
a__ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1e-4 ) )
| 289
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654
|
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : int = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class SCREAMING_SNAKE_CASE_ (pl.LightningModule ):
'''simple docstring'''
def __init__( self : Any , __a : Optional[int] ) ->str:
super().__init__()
lowerCamelCase_ : List[Any] = model
lowerCamelCase_ : List[Any] = 2
lowerCamelCase_ : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase ( self : Any ) ->Tuple:
pass
def __lowerCamelCase ( A__ : str , A__ : str , A__ : str ) -> Any:
# load longformer model from model identifier
lowerCamelCase_ : Tuple = LongformerModel.from_pretrained(A__ )
lowerCamelCase_ : Optional[Any] = LightningModel(A__ )
lowerCamelCase_ : Tuple = torch.load(A__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowerCamelCase_ : List[str] = LongformerForQuestionAnswering.from_pretrained(A__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(A__ )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case__ : Dict = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 171
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Any ) -> int:
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 332
|
from ..utils import DummyObject, requires_backends
class a ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ['note_seq']
def __init__( self : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) -> str:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls : Tuple , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 332
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {}
class __lowercase ( a__ ):
_lowerCAmelCase = "llama"
_lowerCAmelCase = ["past_key_values"]
def __init__( self : Tuple , lowercase__ : Tuple=3_2_0_0_0 , lowercase__ : List[Any]=4_0_9_6 , lowercase__ : Tuple=1_1_0_0_8 , lowercase__ : int=3_2 , lowercase__ : int=3_2 , lowercase__ : Optional[int]=None , lowercase__ : List[str]="silu" , lowercase__ : List[Any]=2_0_4_8 , lowercase__ : Union[str, Any]=0.02 , lowercase__ : List[str]=1e-6 , lowercase__ : Optional[int]=True , lowercase__ : str=0 , lowercase__ : Union[str, Any]=1 , lowercase__ : int=2 , lowercase__ : str=1 , lowercase__ : Any=False , lowercase__ : Any=None , **lowercase__ : Union[str, Any] , ):
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = intermediate_size
a_ = num_hidden_layers
a_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a_ = num_attention_heads
a_ = num_key_value_heads
a_ = hidden_act
a_ = initializer_range
a_ = rms_norm_eps
a_ = pretraining_tp
a_ = use_cache
a_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ , )
def __magic_name__ ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
a_ = self.rope_scaling.get('''type''' , lowercase__ )
a_ = self.rope_scaling.get('''factor''' , lowercase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 143
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Any=7 , lowercase__ : Any=3 , lowercase__ : str=3_0 , lowercase__ : int=4_0_0 , lowercase__ : Dict=True , lowercase__ : Union[str, Any]=None , lowercase__ : str=True , lowercase__ : Dict=1 / 2_5_5 , lowercase__ : List[Any]=True , lowercase__ : Dict=[0.5, 0.5, 0.5] , lowercase__ : List[str]=[0.5, 0.5, 0.5] , lowercase__ : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a_ = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean
a_ = image_std
a_ = do_pad
def __magic_name__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __magic_name__ ( self : Tuple , lowercase__ : Optional[int] , lowercase__ : Optional[int]=False ):
if not batched:
a_ = image_inputs[0]
if isinstance(lowercase__ , Image.Image ):
a_ , a_ = image.size
else:
a_ , a_ = image.shape[1], image.shape[2]
if w < h:
a_ = int(self.size['''shortest_edge'''] * h / w )
a_ = self.size['''shortest_edge''']
elif w > h:
a_ = self.size['''shortest_edge''']
a_ = int(self.size['''shortest_edge'''] * w / h )
else:
a_ = self.size['''shortest_edge''']
a_ = self.size['''shortest_edge''']
else:
a_ = []
for image in image_inputs:
a_ , a_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ = max(lowercase__ , key=lambda lowercase__ : item[0] )[0]
a_ = max(lowercase__ , key=lambda lowercase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( a__ , unittest.TestCase ):
_lowerCAmelCase = DetrImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Dict ):
a_ = DetrImageProcessingTester(self )
@property
def __magic_name__ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Optional[int] ):
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowercase__ , '''rescale_factor''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ , '''size''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_pad''' ) )
def __magic_name__ ( self : Any ):
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowercase__ )
a_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowercase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , lowercase__ )
def __magic_name__ ( self : str ):
pass
def __magic_name__ ( self : List[Any] ):
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
a_ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self : Optional[int] ):
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self : List[str] ):
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __magic_name__ ( self : Dict ):
# prepare image and target
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a_ = json.loads(f.read() )
a_ = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
a_ = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
a_ = image_processing(images=lowercase__ , annotations=lowercase__ , return_tensors='''pt''' )
# verify pixel values
a_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowercase__ )
a_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase__ )
a_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase__ ) )
# verify class_labels
a_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase__ ) )
# verify orig_size
a_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase__ ) )
# verify size
a_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase__ ) )
@slow
def __magic_name__ ( self : str ):
# prepare image, target and masks_path
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a_ = json.loads(f.read() )
a_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
a_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a_ = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
a_ = image_processing(images=lowercase__ , annotations=lowercase__ , masks_path=lowercase__ , return_tensors='''pt''' )
# verify pixel values
a_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowercase__ )
a_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase__ )
a_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase__ ) )
# verify class_labels
a_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase__ ) )
# verify masks
a_ = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowercase__ )
# verify orig_size
a_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase__ ) )
# verify size
a_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase__ ) )
| 143
| 1
|
"""simple docstring"""
def lowercase ( UpperCamelCase : int = 1000000 ):
"""simple docstring"""
A__ : Optional[Any] =set(range(3 , UpperCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase , UpperCamelCase ) ) )
A__ : int =[float(UpperCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase , limit + 1 , UpperCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 656
|
"""simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656
| 1
|
'''simple docstring'''
from __future__ import annotations
def _A ( A ,A ,A ,) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Tuple = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : int = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = [1, 1_0_2_4, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Tuple = """tabby, tabby cat"""
lowerCAmelCase : Optional[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _A ( A ,A ,A=None ) -> List[str]:
lowercase : List[str] = {}
if isinstance(A ,A ):
lowercase : Dict = model.mobilenet_va
else:
lowercase : List[str] = model
lowercase : Dict = "MobilenetV1/Conv2d_0/"
lowercase : Union[str, Any] = backbone.conv_stem.convolution.weight
lowercase : List[Any] = backbone.conv_stem.normalization.bias
lowercase : List[str] = backbone.conv_stem.normalization.weight
lowercase : Any = backbone.conv_stem.normalization.running_mean
lowercase : Union[str, Any] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase : Union[str, Any] = i + 1
lowercase : int = i * 2
lowercase : Union[str, Any] = backbone.layer[pt_index]
lowercase : List[str] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase : Tuple = pointer.convolution.weight
lowercase : Any = pointer.normalization.bias
lowercase : List[Any] = pointer.normalization.weight
lowercase : List[Any] = pointer.normalization.running_mean
lowercase : List[str] = pointer.normalization.running_var
lowercase : str = backbone.layer[pt_index + 1]
lowercase : List[str] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase : str = pointer.convolution.weight
lowercase : str = pointer.normalization.bias
lowercase : Any = pointer.normalization.weight
lowercase : str = pointer.normalization.running_mean
lowercase : int = pointer.normalization.running_var
if isinstance(A ,A ):
lowercase : Optional[Any] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
lowercase : Union[str, Any] = model.classifier.weight
lowercase : Optional[int] = model.classifier.bias
return tf_to_pt_map
def _A ( A ,A ,A ) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
lowercase : Union[str, Any] = tf.train.list_variables(A )
lowercase : int = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase : Optional[int] = tf.train.load_variable(A ,A )
lowercase : str = array
# Build TF to PyTorch weights loading map
lowercase : Union[str, Any] = _build_tf_to_pytorch_map(A ,A ,A )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase : Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
lowercase : Any = np.transpose(A ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase : List[Any] = array.squeeze().transpose()
else:
lowercase : Optional[int] = np.transpose(A ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase : Optional[Any] = torch.from_numpy(A )
tf_weights.pop(A ,A )
tf_weights.pop(name + "/RMSProp" ,A )
tf_weights.pop(name + "/RMSProp_1" ,A )
tf_weights.pop(name + "/ExponentialMovingAverage" ,A )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def _A ( A ,A ) -> torch.Tensor:
lowercase , lowercase : List[Any] = features.shape[-2:]
lowercase , lowercase : List[str] = conv_layer.stride
lowercase , lowercase : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase : Dict = max(kernel_height - stride_height ,0 )
else:
lowercase : Optional[int] = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
lowercase : str = max(kernel_width - stride_width ,0 )
else:
lowercase : Optional[int] = max(kernel_width - (in_width % stride_width) ,0 )
lowercase : Dict = pad_along_width // 2
lowercase : str = pad_along_width - pad_left
lowercase : Optional[int] = pad_along_height // 2
lowercase : Tuple = pad_along_height - pad_top
lowercase : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A ,A ,"constant" ,0.0 )
class _UpperCamelCase ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ , a_ = 1 , a_ = 1 , a_ = False , a_ = True , a_ = True , ) -> None:
super().__init__()
lowercase : Any = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase : Union[str, Any] = nn.Convad(
in_channels=a_ , out_channels=a_ , kernel_size=a_ , stride=a_ , padding=a_ , groups=a_ , bias=a_ , padding_mode="zeros" , )
if use_normalization:
lowercase : Optional[Any] = nn.BatchNormad(
num_features=a_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=a_ , track_running_stats=a_ , )
else:
lowercase : Optional[int] = None
if use_activation:
if isinstance(a_ , a_ ):
lowercase : int = ACTaFN[use_activation]
elif isinstance(config.hidden_act , a_ ):
lowercase : Any = ACTaFN[config.hidden_act]
else:
lowercase : List[Any] = config.hidden_act
else:
lowercase : List[str] = None
def a__ ( self , a_ ) -> torch.Tensor:
if self.config.tf_padding:
lowercase : Optional[int] = apply_tf_padding(a_ , self.convolution )
lowercase : Optional[int] = self.convolution(a_ )
if self.normalization is not None:
lowercase : Any = self.normalization(a_ )
if self.activation is not None:
lowercase : Dict = self.activation(a_ )
return features
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = MobileNetVaConfig
_snake_case = load_tf_weights_in_mobilenet_va
_snake_case = '''mobilenet_v1'''
_snake_case = '''pixel_values'''
_snake_case = False
def a__ ( self , a_ ) -> None:
if isinstance(a_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCAmelCase : Optional[Any] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Union[str, Any] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_ = True ) -> Optional[Any]:
super().__init__(a_ )
lowercase : str = config
lowercase : Optional[int] = 3_2
lowercase : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase : Tuple = MobileNetVaConvLayer(
a_ , in_channels=config.num_channels , out_channels=a_ , kernel_size=3 , stride=2 , )
lowercase : Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase : List[str] = nn.ModuleList()
for i in range(1_3 ):
lowercase : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
a_ , in_channels=a_ , out_channels=a_ , kernel_size=3 , stride=strides[i] , groups=a_ , ) )
self.layer.append(
MobileNetVaConvLayer(
a_ , in_channels=a_ , out_channels=a_ , kernel_size=1 , ) )
lowercase : List[str] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def a__ ( self , a_ ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self , a_ = None , a_ = None , a_ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowercase : Tuple = self.conv_stem(a_ )
lowercase : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase : str = layer_module(a_ )
if output_hidden_states:
lowercase : List[str] = all_hidden_states + (hidden_states,)
lowercase : Optional[int] = hidden_states
if self.pooler is not None:
lowercase : Dict = torch.flatten(self.pooler(a_ ) , start_dim=1 )
else:
lowercase : str = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a_ , pooler_output=a_ , hidden_states=a_ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ ) -> None:
super().__init__(a_ )
lowercase : Optional[Any] = config.num_labels
lowercase : Optional[Any] = MobileNetVaModel(a_ )
lowercase : Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase : Dict = nn.Dropout(config.classifier_dropout_prob , inplace=a_ )
lowercase : Any = nn.Linear(a_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self , a_ = None , a_ = None , a_ = None , a_ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase : str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Union[str, Any] = self.mobilenet_va(a_ , output_hidden_states=a_ , return_dict=a_ )
lowercase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase : Any = self.classifier(self.dropout(a_ ) )
lowercase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase : List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase : List[str] = "single_label_classification"
else:
lowercase : Optional[int] = "multi_label_classification"
if self.config.problem_type == "regression":
lowercase : int = MSELoss()
if self.num_labels == 1:
lowercase : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase : Dict = loss_fct(a_ , a_ )
elif self.config.problem_type == "single_label_classification":
lowercase : int = CrossEntropyLoss()
lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase : Union[str, Any] = BCEWithLogitsLoss()
lowercase : Any = loss_fct(a_ , a_ )
if not return_dict:
lowercase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=a_ , logits=a_ , hidden_states=outputs.hidden_states , )
| 425
| 1
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str , lowerCamelCase : set , lowerCamelCase : set , lowerCamelCase : dict , lowerCamelCase : dict , lowerCamelCase : PriorityQueue , lowerCamelCase : dict , lowerCamelCase : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ : str = cst_fwd.get(lowerCAmelCase_ , np.inf)
A_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
A_ : int = new_cost_f
A_ : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : dict , lowerCamelCase : dict):
A_ : Dict = -1
A_ : str = set()
A_ : List[Any] = set()
A_ : Dict = {source: 0}
A_ : Optional[int] = {destination: 0}
A_ : str = {source: None}
A_ : Tuple = {destination: None}
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : Union[str, Any] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ : str = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
A_ : List[Any] = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
A_ : Union[str, Any] = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
A_ : List[str] = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ : Optional[Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
__magic_name__ = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
_a = MODEL_FOR_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCAmelCase ( self : Optional[Any] ) ->int:
lowerCamelCase_ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ : List[str] = text_generator("""This is a test""" , do_sample=__a )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
lowerCamelCase_ : List[str] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__a , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
lowerCamelCase_ : Any = text_generator("""This is a test""" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
] , )
lowerCamelCase_ : List[Any] = text_generator.model.config.eos_token_id
lowerCamelCase_ : Tuple = """<pad>"""
lowerCamelCase_ : Union[str, Any] = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
],
[
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
],
] , )
@require_tf
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
lowerCamelCase_ : int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ : Dict = text_generator("""This is a test""" , do_sample=__a )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
lowerCamelCase_ : str = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__a )
self.assertEqual(
__a , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _lowerCAmelCase ( self : int , __a : Any , __a : List[str] , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : List[str] = TextGenerationPipeline(model=__a , tokenizer=__a )
return text_generator, ["This is a test", "Another test"]
def _lowerCAmelCase ( self : int ) ->Optional[int]:
lowerCamelCase_ : str = """Hello I believe in"""
lowerCamelCase_ : List[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase_ : Dict = text_generator(__a )
self.assertEqual(
__a , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
lowerCamelCase_ : Union[str, Any] = text_generator(__a , stop_sequence=""" fe""" )
self.assertEqual(__a , [{"""generated_text""": """Hello I believe in fe"""}] )
def _lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : int = text_generator.model
lowerCamelCase_ : Any = text_generator.tokenizer
lowerCamelCase_ : Optional[Any] = text_generator("""This is a test""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
lowerCamelCase_ : Any = text_generator("""This is a test""" , return_full_text=__a )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
lowerCamelCase_ : Optional[Any] = pipeline(task="""text-generation""" , model=__a , tokenizer=__a , return_full_text=__a )
lowerCamelCase_ : Any = text_generator("""This is a test""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
lowerCamelCase_ : Any = text_generator("""This is a test""" , return_full_text=__a )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
lowerCamelCase_ : List[str] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ : List[str] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
] , )
with self.assertRaises(__a ):
lowerCamelCase_ : Dict = text_generator("""test""" , return_full_text=__a , return_text=__a )
with self.assertRaises(__a ):
lowerCamelCase_ : Union[str, Any] = text_generator("""test""" , return_full_text=__a , return_tensors=__a )
with self.assertRaises(__a ):
lowerCamelCase_ : Optional[Any] = text_generator("""test""" , return_text=__a , return_tensors=__a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ : str = text_generator("""""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ : Union[str, Any] = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ : str = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
lowerCamelCase_ : Optional[int] = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__a ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
import torch
# Classic `model_kwargs`
lowerCamelCase_ : Any = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ : int = pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ : str = pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ : List[str] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ : str = pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCAmelCase ( self : Dict ) ->Optional[Any]:
import torch
lowerCamelCase_ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase ( self : List[str] ) ->Optional[int]:
import torch
lowerCamelCase_ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__a , top_p=0.5 )
def _lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
lowerCamelCase_ : Optional[int] = """Hello world"""
lowerCamelCase_ : Optional[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
lowerCamelCase_ : str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
lowerCamelCase_ : int = logging.get_logger("""transformers.generation.utils""" )
lowerCamelCase_ : List[Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__a ) as cl:
lowerCamelCase_ : Any = text_generator(__a , max_length=10 , max_new_tokens=1 )
self.assertIn(__a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__a ) as cl:
lowerCamelCase_ : int = text_generator(__a , max_new_tokens=1 )
self.assertNotIn(__a , cl.out )
with CaptureLogger(__a ) as cl:
lowerCamelCase_ : Optional[int] = text_generator(__a , max_length=10 )
self.assertNotIn(__a , cl.out )
| 171
|
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None , **A__ : Dict ) -> str:
lowerCamelCase_ : Union[str, Any] = [x.strip() for x in open(A__ ).readlines()]
lowerCamelCase_ : Union[str, Any] = [x.strip() for x in open(A__ ).readlines()][: len(A__ )]
lowerCamelCase_ : int = calculate_rouge(A__ , A__ , **A__ )
if save_path is not None:
save_json(A__ , A__ , indent=A__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 171
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a_ = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 296
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[Any] , __lowercase : Tuple , __lowercase : Tuple=7 , __lowercase : List[str]=3 , __lowercase : List[Any]=18 , __lowercase : int=30 , __lowercase : Any=4_00 , __lowercase : Dict=True , __lowercase : Dict=None , __lowercase : Union[str, Any]=True , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Dict =num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] =image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =min_resolution
SCREAMING_SNAKE_CASE__ : Dict =max_resolution
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] =size
SCREAMING_SNAKE_CASE__ : Tuple =apply_ocr
def __magic_name__ ( self : Tuple ) -> List[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =LayoutLMvaImageProcessingTester(self )
@property
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''apply_ocr''' ) )
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __magic_name__ ( self : int ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : int =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __lowercase )
self.assertIsInstance(encoding.boxes , __lowercase )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Union[str, Any] ) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__ : int =LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : Tuple =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : int =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_processing(__lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__ : Any =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__ : Optional[Any] =[[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowercase )
self.assertListEqual(encoding.boxes , __lowercase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__ : Dict =LayoutLMvaImageProcessor(apply_ocr=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image_processing(__lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 296
| 1
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A__ : List[str]= None
try:
import msvcrt
except ImportError:
A__ : Dict= None
try:
import fcntl
except ImportError:
A__ : Dict= None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A__ : Optional[int]= OSError
# Data
# ------------------------------------------------
A__ : str= [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
A__ : Union[str, Any]= """3.0.12"""
A__ : Any= None
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
global _logger
UpperCamelCase__ = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ ) -> Any:
UpperCamelCase__ = lock_file
return None
def __str__( self ) -> Optional[int]:
UpperCamelCase__ = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> List[Any]:
UpperCamelCase__ = lock
return None
def __enter__( self ) -> int:
return self.lock
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
self.lock.release()
return None
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> Dict:
UpperCamelCase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase__ = self.hash_filename_if_too_long(snake_case_ , snake_case_ )
# The path to the lock file.
UpperCamelCase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase__ = None
# The default timeout value.
UpperCamelCase__ = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase__ = 0
return None
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return self._lock_file
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = float(snake_case_ )
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
raise NotImplementedError()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=None , snake_case_=0.05 ) -> int:
# Use the default timeout, if no timeout is provided.
if timeout is None:
UpperCamelCase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase__ = id(self )
UpperCamelCase__ = self._lock_file
UpperCamelCase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(snake_case_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=False ) -> Tuple:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase__ = id(self )
UpperCamelCase__ = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
UpperCamelCase__ = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ) -> List[Any]:
self.acquire()
return self
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
self.release()
return None
def __del__( self ) -> Optional[Any]:
self.release(force=snake_case_ )
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = os.path.basename(snake_case_ )
if len(snake_case_ ) > max_length and max_length > 0:
UpperCamelCase__ = os.path.dirname(snake_case_ )
UpperCamelCase__ = str(hash(snake_case_ ) )
UpperCamelCase__ = filename[: max_length - len(snake_case_ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(snake_case_ , snake_case_ )
else:
return path
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
UpperCamelCase__ = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase__ = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case_ )
else:
UpperCamelCase__ = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self._lock_file_fd
UpperCamelCase__ = None
msvcrt.locking(snake_case_ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> int:
UpperCamelCase__ = os.statvfs(os.path.dirname(snake_case_ ) ).f_namemax
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase__ = os.open(self._lock_file , snake_case_ )
try:
fcntl.flock(snake_case_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case_ )
else:
UpperCamelCase__ = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
UpperCamelCase__ = self._lock_file_fd
UpperCamelCase__ = None
fcntl.flock(snake_case_ , fcntl.LOCK_UN )
os.close(snake_case_ )
return None
class __lowerCamelCase ( _a ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase__ = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
UpperCamelCase__ = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
os.close(self._lock_file_fd )
UpperCamelCase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A__ : Union[str, Any]= None
if msvcrt:
A__ : List[Any]= WindowsFileLock
elif fcntl:
A__ : Optional[Any]= UnixFileLock
else:
A__ : Dict= SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 20
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def __a ( __UpperCAmelCase , __UpperCAmelCase=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
a__ = n - 1
a__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
a__ = 0
while count < prec:
a__ = random.randint(2 , n - 1 )
a__ = bin_exp_mod(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if b != 1:
a__ = True
for _ in range(__UpperCAmelCase ):
if b == n - 1:
a__ = False
break
a__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a_ : Optional[Any] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 194
|
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase_ = 1
for n in range(m + 1 ):
for k in range(1 , __lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE__ = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 601
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: str=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowercase_ = nn.Parameter(__lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowercase_ = nn.Parameter(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = np.asarray(weights[0] )
lowercase_ = np.asarray(weights[1] )
lowercase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCamelCase ).view(-1 , __lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = np.asarray(weights[0] )
lowercase_ = np.asarray(weights[1] )
lowercase_ = np.asarray(weights[2] )
lowercase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCamelCase ).view(-1 , __lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = weights[0][0][0]
lowercase_ = np.asarray(layer_norm_a[0] )
lowercase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# lsh weights + output
lowercase_ = weights[0][1]
if len(__lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCamelCase , torch_block.attention , __lowerCamelCase )
else:
set_layer_weights_in_torch_local(__lowerCamelCase , torch_block.attention , __lowerCamelCase )
# intermediate weighs
lowercase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCamelCase ) == 4:
lowercase_ = intermediate_weights[2]
# layernorm 2
lowercase_ = np.asarray(intermediate_weights[0][0] )
lowercase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# intermediate dense
lowercase_ = np.asarray(intermediate_weights[1][0] )
lowercase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
# intermediate out
lowercase_ = np.asarray(intermediate_weights[4][0] )
lowercase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = torch_model.reformer
# word embeds
lowercase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCamelCase ) , )
if isinstance(weights[3] , __lowerCamelCase ):
lowercase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowercase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowercase_ = nn.Parameter(torch.tensor(__lowerCamelCase ) )
lowercase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowercase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# output layer norm
lowercase_ = np.asarray(weights[7][0] )
lowercase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# output embeddings
lowercase_ = np.asarray(weights[9][0] )
lowercase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] ):
'''simple docstring'''
lowercase_ = ReformerConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
lowercase_ = ReformerModelWithLMHead(__lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as f:
lowercase_ = pickle.load(__lowerCamelCase )["weights"]
set_model_weights_in_torch(__lowerCamelCase , __lowerCamelCase , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 601
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = '''speech_to_text'''
SCREAMING_SNAKE_CASE = ['''past_key_values''']
SCREAMING_SNAKE_CASE = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , __UpperCamelCase : Tuple=1_0_0_0_0 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : str=2_0_4_8 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : int=6 , __UpperCamelCase : Dict=2_0_4_8 , __UpperCamelCase : Any=4 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]="relu" , __UpperCamelCase : Optional[int]=2_5_6 , __UpperCamelCase : str=0.1 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Union[str, Any]=0.02 , __UpperCamelCase : str=2 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Optional[Any]=6_0_0_0 , __UpperCamelCase : List[Any]=1_0_2_4 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=(5, 5) , __UpperCamelCase : Any=1_0_2_4 , __UpperCamelCase : Any=8_0 , __UpperCamelCase : Dict=1 , **__UpperCamelCase : Optional[int] , ):
lowerCamelCase_ = vocab_size
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ = max_source_positions
lowerCamelCase_ = max_target_positions
lowerCamelCase_ = num_conv_layers
lowerCamelCase_ = list(__UpperCamelCase )
lowerCamelCase_ = conv_channels
lowerCamelCase_ = input_feat_per_channel
lowerCamelCase_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
| 272
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''LayoutLMv2FeatureExtractor''']
lowercase = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
UpperCamelCase = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
UpperCamelCase = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def __lowercase ( self) -> Any:
'''simple docstring'''
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = CHRF.CHAR_ORDER , UpperCAmelCase_ = CHRF.WORD_ORDER , UpperCAmelCase_ = CHRF.BETA , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , ) -> List[str]:
'''simple docstring'''
lowercase__: int = len(references[0])
if any(len(UpperCAmelCase_) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
lowercase__: Union[str, Any] = [[refs[i] for refs in references] for i in range(UpperCAmelCase_)]
lowercase__: Optional[Any] = CHRF(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Union[str, Any] = sb_chrf.corpus_score(UpperCAmelCase_ , UpperCAmelCase_)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 120
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """new-model"""
if is_tf_available():
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = NewModelConfig
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: Tuple = "bert-base-cased"
lowercase__: List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: int = TFAutoModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = "bert-base-cased"
lowercase__: List[str] = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Optional[int] = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Dict = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: str = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_)
lowercase__ , lowercase__: List[str] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[str] = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Dict = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Tuple = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Optional[int] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_)
lowercase__ , lowercase__: int = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Tuple = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_)
lowercase__ , lowercase__: str = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__: Tuple = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: int = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__: Tuple = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
@require_tensorflow_probability
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowercase__: List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase_)
lowercase__ , lowercase__: Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_) , 14_410)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_) , 14_410)
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: int = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: List[Any] = copy.deepcopy(model.config)
lowercase__: Optional[Any] = ["FunnelBaseModel"]
lowercase__: Union[str, Any] = TFAutoModel.from_config(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_)
lowercase__: Union[str, Any] = TFAutoModel.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def __lowercase ( self) -> Dict:
'''simple docstring'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_)
lowercase__: Any = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase_):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_)
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_)
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__: Tuple = BertModelTester(self).get_config()
lowercase__: List[Any] = NewModelConfig(**tiny_config.to_dict())
lowercase__: Optional[int] = auto_class.from_config(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_)
lowercase__: int = auto_class.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowercase ( self) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier"):
lowercase__: List[str] = TFAutoModel.from_pretrained("bert-base")
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
lowercase__: Any = TFAutoModel.from_pretrained(UpperCAmelCase_ , revision="aaaaaa")
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
lowercase__: Any = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model")
def __lowercase ( self) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , "Use `from_pt=True` to load this model"):
lowercase__: List[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Tuple = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
lowercase__: Dict = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
lowercase__: Union[str, Any] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
with RequestCounter() as counter:
lowercase__: List[str] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 120
| 1
|
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__snake_case = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__snake_case = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowerCamelCase__ : str ):
lowercase__ : Optional[Any] = (images / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ : Dict = numpy_to_pil(lowerCamelCase__ )
return images
def _lowerCamelCase ( lowerCamelCase__ : Dict ):
if images.ndim == 3:
lowercase__ : str = images[None, ...]
lowercase__ : Any = (images * 2_55).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase__ : Tuple = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase__ : Tuple = [Image.fromarray(lowerCamelCase__ ) for image in images]
return pil_images
| 200
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Path , lowerCamelCase__ : str = None , lowerCamelCase__ : str = None , lowerCamelCase__ : str = None , ):
if config_name_or_path is None:
lowercase__ : Tuple = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
lowercase__ : List[Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowercase__ : List[Any] = question_encoder_name_or_path
lowercase__ : Any = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
lowercase__ : Union[str, Any] = RagConfig.from_pretrained(lowerCamelCase__ )
lowercase__ : List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
lowercase__ : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
lowercase__ : Union[str, Any] = gen_config
lowercase__ : Any = question_encoder_config
lowercase__ : str = model_class.from_pretrained_question_encoder_generator(
lowerCamelCase__ , lowerCamelCase__ , config=lowerCamelCase__ )
rag_model.save_pretrained(lowerCamelCase__ )
# Sanity check.
model_class.from_pretrained(lowerCamelCase__ )
# Save tokenizers.
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
__snake_case = parser.parse_args()
__snake_case = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 200
| 1
|
import os
import string
import sys
__UpperCamelCase : Optional[Any] = 1 << 8
__UpperCamelCase : Any = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__UpperCamelCase : int = KEYMAP['up']
__UpperCamelCase : str = KEYMAP['left']
if sys.platform == "win32":
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCamelCase : List[str] = ord(str(i))
def snake_case_ ( ):
if os.name == "nt":
import msvcrt
UpperCAmelCase_ : List[Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowercase ) == 0:
# Read the keystroke
UpperCAmelCase_ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase_ : str = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase_ : List[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(__lowercase )
if ord(__lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
UpperCAmelCase_ : str = chr(KEYMAP['''esc'''] )
except KeyError:
UpperCAmelCase_ : int = cha[1]
else:
UpperCAmelCase_ : List[Any] = ch.decode(__lowercase )
else:
UpperCAmelCase_ : str = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase_ : List[Any] = sys.stdin.fileno()
UpperCAmelCase_ : Union[str, Any] = termios.tcgetattr(__lowercase )
try:
tty.setraw(__lowercase )
UpperCAmelCase_ : List[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowercase , termios.TCSADRAIN , __lowercase )
return ch
def snake_case_ ( ):
UpperCAmelCase_ : Dict = get_raw_chars()
if ord(__lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowercase ) == KEYMAP["esc"]:
UpperCAmelCase_ : Optional[Any] = get_raw_chars()
if ord(__lowercase ) == KEYMAP["mod_int"]:
UpperCAmelCase_ : Optional[int] = get_raw_chars()
if ord(__lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : int ):
lowerCAmelCase__ = num_of_nodes
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase__ = self.find_component(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase__ = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = edge
lowerCAmelCase__ = self.m_component[u]
lowerCAmelCase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = edge
lowerCAmelCase__ = self.m_component[u]
lowerCAmelCase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ , snake_case__ , snake_case__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCAmelCase__ = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
|
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : Optional[Any] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : str = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCAmelCase : int = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def remove_articles(lowerCamelCase__ ):
lowerCAmelCase__ = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(lowerCamelCase__ , """ """ , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
lowerCAmelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [any(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for ref in refs ) for pred, refs in zip(lowerCamelCase__ , lowerCamelCase__ )]
return (sum(lowerCamelCase__ ) / len(lowerCamelCase__ )) * 100
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCAmelCase__ = Counter(lowerCamelCase__ )
lowerCAmelCase__ = Counter(lowerCamelCase__ )
lowerCAmelCase__ = Counter()
for sgram, scount in sgramcounter.items():
lowerCAmelCase__ = scount * numref
lowerCAmelCase__ = Counter(lowerCamelCase__ )
lowerCAmelCase__ = Counter()
for cgram, ccount in cgramcounter.items():
lowerCAmelCase__ = ccount * numref
# KEEP
lowerCAmelCase__ = sgramcounter_rep & cgramcounter_rep
lowerCAmelCase__ = keepgramcounter_rep & rgramcounter
lowerCAmelCase__ = sgramcounter_rep & rgramcounter
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = keeptmpscorea / len(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCAmelCase__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCAmelCase__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCAmelCase__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCAmelCase__ = sgramcounter_rep - cgramcounter_rep
lowerCAmelCase__ = delgramcounter_rep - rgramcounter
lowerCAmelCase__ = sgramcounter_rep - rgramcounter
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase__ = 1
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = deltmpscorea / len(lowerCamelCase__ )
# ADDITION
lowerCAmelCase__ = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
lowerCAmelCase__ = set(lowerCamelCase__ ) & set(lowerCamelCase__ )
lowerCAmelCase__ = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
lowerCAmelCase__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = addtmpscore / len(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = addtmpscore / len(lowerCamelCase__ )
lowerCAmelCase__ = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCAmelCase__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = len(lowerCamelCase__ )
lowerCAmelCase__ = ssent.split(""" """ )
lowerCAmelCase__ = csent.split(""" """ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for rsent in rsents:
lowerCAmelCase__ = rsent.split(""" """ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
ragramslist.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
lowerCAmelCase__ = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
lowerCAmelCase__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
lowerCAmelCase__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
lowerCAmelCase__ = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
lowerCAmelCase__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
lowerCAmelCase__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
lowerCAmelCase__ = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
lowerCAmelCase__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
lowerCAmelCase__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(lowerCamelCase__ )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCAmelCase__ = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCAmelCase__ = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCAmelCase__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = "13a" , lowerCamelCase__ = True ):
"""simple docstring"""
if lowercase:
lowerCAmelCase__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCAmelCase__ = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase__ )()(lowerCamelCase__ )
else:
lowerCAmelCase__ = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase__ )
elif tokenizer == "moses":
lowerCAmelCase__ = sacremoses.MosesTokenizer().tokenize(lowerCamelCase__ , return_str=lowerCamelCase__ , escape=lowerCamelCase__ )
elif tokenizer == "penn":
lowerCAmelCase__ = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase__ , return_str=lowerCamelCase__ )
else:
lowerCAmelCase__ = sentence
if not return_str:
lowerCAmelCase__ = normalized_sent.split()
return normalized_sent
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if not (len(lowerCamelCase__ ) == len(lowerCamelCase__ ) == len(lowerCamelCase__ )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
lowerCAmelCase__ = 0
for src, pred, refs in zip(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
sari_score += SARIsent(normalize(lowerCamelCase__ ) , normalize(lowerCamelCase__ ) , [normalize(lowerCamelCase__ ) for sent in refs] )
lowerCAmelCase__ = sari_score / len(lowerCamelCase__ )
return 100 * sari_score
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="exp" , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ):
"""simple docstring"""
lowerCAmelCase__ = len(references[0] )
if any(len(lowerCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(lowerCamelCase__ )]
lowerCAmelCase__ = sacrebleu.corpus_bleu(
lowerCamelCase__ , lowerCamelCase__ , smooth_method=lowerCamelCase__ , smooth_value=lowerCamelCase__ , force=lowerCamelCase__ , lowercase=lowerCamelCase__ , use_effective_order=lowerCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = {}
result.update({"""sari""": compute_sari(sources=snake_case__ , predictions=snake_case__ , references=snake_case__ )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=snake_case__ , references=snake_case__ )} )
result.update({"""exact""": compute_em(predictions=snake_case__ , references=snake_case__ )} )
return result
| 644
| 1
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
A: int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str = question_encoder
UpperCAmelCase : Tuple = generator
UpperCAmelCase : Dict = self.question_encoder
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , """question_encoder_tokenizer""" )
UpperCAmelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """generator_tokenizer""" )
self.question_encoder.save_pretrained(_SCREAMING_SNAKE_CASE )
self.generator.save_pretrained(_SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase : Optional[Any] = kwargs.pop("""config""" , _SCREAMING_SNAKE_CASE )
if config is None:
UpperCAmelCase : Tuple = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
_SCREAMING_SNAKE_CASE , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
_SCREAMING_SNAKE_CASE , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return self.current_tokenizer(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return self.generator.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return self.generator.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any = self.question_encoder
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.generator
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "longest" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , _SCREAMING_SNAKE_CASE , )
if max_length is None:
UpperCAmelCase : Tuple = self.current_tokenizer.model_max_length
UpperCAmelCase : Any = self(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase : Optional[int] = self.current_tokenizer.model_max_length
UpperCAmelCase : str = self(
text_target=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = labels["""input_ids"""]
return model_inputs
| 713
|
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A: List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = 'AutoTokenizer'
__lowerCAmelCase : str = ['tokenizer']
__lowerCAmelCase : Any = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
UpperCAmelCase : Any = get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
UpperCAmelCase : Optional[int] = None
else:
with open(_SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
UpperCAmelCase : List[str] = json.load(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(tokenizer=_SCREAMING_SNAKE_CASE , speaker_embeddings=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , _SCREAMING_SNAKE_CASE="speaker_embeddings" , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """v2""" ) , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = {}
UpperCAmelCase : Union[str, Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCAmelCase : Optional[Any] = self._load_voice_preset(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , _SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
UpperCAmelCase : Tuple = tmp_dict
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
super().save_pretrained(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.speaker_embeddings[voice_preset]
UpperCAmelCase : List[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
UpperCAmelCase : List[str] = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
UpperCAmelCase : List[str] = np.load(_SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = None ) -> List[str]:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="pt" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
if voice_preset is not None and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCAmelCase : Dict = self._load_voice_preset(_SCREAMING_SNAKE_CASE )
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not voice_preset.endswith(""".npz""" ):
UpperCAmelCase : Tuple = voice_preset + """.npz"""
UpperCAmelCase : Union[str, Any] = np.load(_SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.tokenizer(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
UpperCAmelCase : List[Any] = voice_preset
return encoded_text
| 359
| 0
|
def _snake_case (__lowercase):
UpperCamelCase_ = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _snake_case (__lowercase):
UpperCamelCase_ = 0
while number > 0:
UpperCamelCase_ = number % 10
sum_of_digits += last_digit
UpperCamelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case (__lowercase = 100):
UpperCamelCase_ = factorial(__lowercase)
UpperCamelCase_ = split_and_add(__lowercase)
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 23
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : int=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Dict=37 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Union[str, Any]=1_28 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : int=4 , _UpperCAmelCase : List[Any]=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , ):
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = NezhaModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = NezhaForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Tuple = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = NezhaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = NezhaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(config=_UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """bert.pt""" ) )
UpperCAmelCase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """bert.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCAmelCase__ = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCAmelCase__ = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 603
| 0
|
"""simple docstring"""
import math
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCamelCase )
if number < 1:
lowerCamelCase__ : Optional[int] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowerCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCamelCase__ : Any = int(math.log(number // 3 , 2 ) ) + 2
lowerCamelCase__ : List[Any] = [3, 5]
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : List[Any] = 3
for block in range(1 , _lowerCamelCase ):
for _ in range(_lowerCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
A_ : Dict = 0
try:
A_ : Any = proth(number)
except ValueError:
print(f"ValueError: there is no {number}th Proth number")
continue
print(f"The {number}th Proth number: {value}")
| 708
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["transformers", "torch", "note_seq"]
def __init__( self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 31
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
A : List[Any] = parser.parse_args()
A : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
A : Tuple = CLIPImageProcessor()
A : int = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
A : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 176
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 712
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( snake_case__ ):
'''simple docstring'''
a_ = ['''image_processor''', '''tokenizer''']
a_ = '''ChineseCLIPImageProcessor'''
a_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ):
_lowerCAmelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _snake_case , )
_lowerCAmelCase : int = kwargs.pop("feature_extractor" )
_lowerCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_snake_case , _snake_case )
_lowerCAmelCase : Union[str, Any] = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_lowerCAmelCase : int = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_lowerCAmelCase : Optional[Any] = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_lowerCAmelCase : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *_snake_case , **_snake_case ):
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *_snake_case , **_snake_case ):
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = self.tokenizer.model_input_names
_lowerCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _snake_case , )
return self.image_processor_class
| 424
|
class __A :
'''simple docstring'''
def __init__( self ):
_lowerCAmelCase : Dict = ""
_lowerCAmelCase : Optional[Any] = ""
_lowerCAmelCase : List[Any] = []
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCAmelCase : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_lowerCAmelCase : Optional[int] = self.__min_dist_top_down_dp(_snake_case , n - 1 )
_lowerCAmelCase : List[str] = self.__min_dist_top_down_dp(m - 1 , _snake_case )
_lowerCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_lowerCAmelCase : Optional[int] = 1 + min(_snake_case , _snake_case , _snake_case )
return self.dp[m][n]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Union[str, Any] = worda
_lowerCAmelCase : int = worda
_lowerCAmelCase : Tuple = [[-1 for _ in range(len(_snake_case ) )] for _ in range(len(_snake_case ) )]
return self.__min_dist_top_down_dp(len(_snake_case ) - 1 , len(_snake_case ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : str = worda
_lowerCAmelCase : Union[str, Any] = worda
_lowerCAmelCase : str = len(_snake_case )
_lowerCAmelCase : List[Any] = len(_snake_case )
_lowerCAmelCase : str = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCAmelCase : int = j
elif j == 0: # second string is empty
_lowerCAmelCase : Optional[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCAmelCase : Union[str, Any] = self.dp[i - 1][j - 1]
else:
_lowerCAmelCase : Tuple = self.dp[i][j - 1]
_lowerCAmelCase : Dict = self.dp[i - 1][j]
_lowerCAmelCase : List[Any] = self.dp[i - 1][j - 1]
_lowerCAmelCase : Tuple = 1 + min(_snake_case , _snake_case , _snake_case )
return self.dp[m][n]
if __name__ == "__main__":
snake_case = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
snake_case = input("Enter the first string: ").strip()
snake_case = input("Enter the second string: ").strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 424
| 1
|
'''simple docstring'''
import logging
from transformers import PretrainedConfig
a_ : str = logging.getLogger(__name__)
a_ : List[str] = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """bertabs"""
def __init__( self , __magic_name__=3_05_22 , __magic_name__=5_12 , __magic_name__=6 , __magic_name__=5_12 , __magic_name__=8 , __magic_name__=5_12 , __magic_name__=0.2 , __magic_name__=6 , __magic_name__=7_68 , __magic_name__=8 , __magic_name__=20_48 , __magic_name__=0.2 , **__magic_name__ , ) -> List[str]:
super().__init__(**__magic_name__ )
_a = vocab_size
_a = max_pos
_a = enc_layers
_a = enc_hidden_size
_a = enc_heads
_a = enc_ff_size
_a = enc_dropout
_a = dec_layers
_a = dec_hidden_size
_a = dec_heads
_a = dec_ff_size
_a = dec_dropout
| 532
|
'''simple docstring'''
from typing import Any
def _A (lowerCAmelCase__ :list ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
_a = [input_list.count(lowerCAmelCase__ ) for value in input_list]
_a = max(lowerCAmelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCAmelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532
| 1
|
"""simple docstring"""
class lowercase__ :
def __init__( self : Any , snake_case__ : Any , snake_case__ : Optional[Any] ):
lowerCamelCase_ : str =name
lowerCamelCase_ : Tuple =val
def __str__( self : str ):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Dict , snake_case__ : List[Any] ):
return self.val < other.val
class lowercase__ :
def __init__( self : int , snake_case__ : int ):
lowerCamelCase_ : Any ={}
lowerCamelCase_ : Dict ={}
lowerCamelCase_ : Union[str, Any] =self.build_heap(lowerCAmelCase__ )
def __getitem__( self : List[str] , snake_case__ : Union[str, Any] ):
return self.get_value(lowerCAmelCase__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : Any ):
return (idx - 1) // 2
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] ):
return idx * 2 + 1
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Union[str, Any] ):
return idx * 2 + 2
def UpperCAmelCase__ ( self : List[str] , snake_case__ : List[str] ):
return self.heap_dict[key]
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Any ):
lowerCamelCase_ : Any =len(lowerCAmelCase__ ) - 1
lowerCamelCase_ : str =self.get_parent_idx(lowerCAmelCase__ )
for idx, i in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ : List[str] =idx
lowerCamelCase_ : Union[str, Any] =i.val
for i in range(lowerCAmelCase__ , -1 , -1 ):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__ )
return array
def UpperCAmelCase__ ( self : Dict , snake_case__ : Dict , snake_case__ : Dict ):
while True:
lowerCamelCase_ : Dict =self.get_left_child_idx(lowerCAmelCase__ ) # noqa: E741
lowerCamelCase_ : Optional[int] =self.get_right_child_idx(lowerCAmelCase__ )
lowerCamelCase_ : List[Any] =idx
if l < len(lowerCAmelCase__ ) and array[l] < array[idx]:
lowerCamelCase_ : Optional[int] =l
if r < len(lowerCAmelCase__ ) and array[r] < array[smallest]:
lowerCamelCase_ : Any =r
if smallest != idx:
lowerCamelCase_ , lowerCamelCase_ : Tuple =array[smallest], array[idx]
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : Tuple =(
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCamelCase_ : Any =smallest
else:
break
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : int ):
lowerCamelCase_ : str =self.get_parent_idx(lowerCAmelCase__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCamelCase_ , lowerCamelCase_ : Tuple =self.heap[idx], self.heap[p]
lowerCamelCase_ , lowerCamelCase_ : Tuple =(
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCamelCase_ : str =p
lowerCamelCase_ : Optional[int] =self.get_parent_idx(lowerCAmelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
return self.heap[0]
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ : str =self.heap[-1], self.heap[0]
lowerCamelCase_ , lowerCamelCase_ : Tuple =(
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCamelCase_ : str =self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] ):
self.heap.append(lowerCAmelCase__ )
lowerCamelCase_ : Optional[int] =len(self.heap ) - 1
lowerCamelCase_ : Union[str, Any] =node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCAmelCase__ ( self : Dict ):
return len(self.heap ) == 0
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : int ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCamelCase_ : Any =new_value
lowerCamelCase_ : List[str] =new_value
self.sift_up(self.idx_of_element[node] )
A__ : Dict = Node('R', -1)
A__ : Any = Node('B', 6)
A__ : Optional[int] = Node('A', 3)
A__ : Optional[int] = Node('X', 1)
A__ : Dict = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A__ : Dict = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = """transfo-xl"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""mems"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=267_735 , lowerCAmelCase__=[20_000, 40_000, 200_000] , lowerCAmelCase__=1_024 , lowerCAmelCase__=1_024 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__=4_096 , lowerCAmelCase__=4 , lowerCAmelCase__=False , lowerCAmelCase__=18 , lowerCAmelCase__=1_600 , lowerCAmelCase__=1_000 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=-1 , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="normal" , lowerCAmelCase__=0.01 , lowerCAmelCase__=0.01 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=0 , **lowerCAmelCase__ , ) -> Any:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = d_embed
SCREAMING_SNAKE_CASE = d_head
SCREAMING_SNAKE_CASE = d_inner
SCREAMING_SNAKE_CASE = div_val
SCREAMING_SNAKE_CASE = pre_lnorm
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = mem_len
SCREAMING_SNAKE_CASE = same_length
SCREAMING_SNAKE_CASE = attn_type
SCREAMING_SNAKE_CASE = clamp_len
SCREAMING_SNAKE_CASE = sample_softmax
SCREAMING_SNAKE_CASE = adaptive
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = dropatt
SCREAMING_SNAKE_CASE = untie_r
SCREAMING_SNAKE_CASE = init
SCREAMING_SNAKE_CASE = init_range
SCREAMING_SNAKE_CASE = proj_init_std
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __A ( self ) -> List[str]:
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __A ( self , lowerCAmelCase__ ) -> int:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 247
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
__A : List[str] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
__A : Union[str, Any] = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
@lru_cache()
def lowerCAmelCase_ ( ):
a__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
a__ = bs[:]
a__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a )
cs.append(2**8 + n )
n += 1
a__ = [chr(a ) for n in cs]
return dict(zip(a , a ) )
def lowerCAmelCase_ ( a : Dict ):
a__ = set()
a__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ = char
return pairs
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE:str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE:Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE:Dict = ['input_ids', 'attention_mask']
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ):
"""simple docstring"""
a__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
a__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
a__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
a__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
a__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
a__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding='utf-8' ) as vocab_handle:
a__ = json.load(_a )
a__ = {v: k for k, v in self.encoder.items()}
a__ = errors # how to handle errors in decoding
a__ = bytes_to_unicode()
a__ = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding='utf-8' ) as merges_handle:
a__ = merges_handle.read().split('\n' )[1:-1]
a__ = [tuple(merge.split() ) for merge in bpe_merges]
a__ = dict(zip(_a , range(len(_a ) ) ) )
a__ = {}
a__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self , _a ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ = tuple(_a )
a__ = get_pairs(_a )
if not pairs:
return token
while True:
a__ = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ = bigram
a__ = []
a__ = 0
while i < len(_a ):
try:
a__ = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ = tuple(_a )
a__ = new_word
if len(_a ) == 1:
break
else:
a__ = get_pairs(_a )
a__ = ' '.join(_a )
a__ = word
return word
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = []
for token in re.findall(self.pat , _a ):
a__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(' ' ) )
return bpe_tokens
def lowercase__ ( self , _a ):
"""simple docstring"""
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def lowercase__ ( self , _a ):
"""simple docstring"""
return self.decoder.get(_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = ''.join(_a )
a__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase__ ( self , _a , _a = None ):
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '\n' )
a__ = 0
with open(_a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
a__ = token_index
writer.write(' '.join(_a ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase__ ( self , _a , _a = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ = [self.cls_token_id]
a__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , _a , _a = None , _a = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def lowercase__ ( self , _a , _a = None ):
"""simple docstring"""
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self , _a , _a=False , **_a ):
"""simple docstring"""
a__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
a__ = ' ' + text
return (text, kwargs)
| 126
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None ):
"""simple docstring"""
super().__init__()
a__ = pad_token_id
a__ = max_length
a__ = vocab
a__ = merges
a__ = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def lowercase__ ( cls , _a , *_a , **_a ):
"""simple docstring"""
a__ = [' '.join(_a ) for m in tokenizer.bpe_ranks.keys()]
a__ = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def lowercase__ ( cls , _a , *_a , **_a ):
"""simple docstring"""
a__ = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def lowercase__ ( cls , _a ):
"""simple docstring"""
return cls(**_a )
def lowercase__ ( self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self , _a , _a = None ):
"""simple docstring"""
a__ = self.tf_tokenizer(_a )
a__ = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
a__ = max_length if max_length is not None else self.max_length
if max_length is not None:
a__ , a__ = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 126
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
A__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ = field(
default=_lowercase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
A__ = field(
default=_lowercase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
A__ = field(
default=_lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ = field(
default=_lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A__ = field(
default=_lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _lowerCAmelCase :
A__ = field(
default=_lowercase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ = field(
default=_lowercase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
A__ = field(
default=_lowercase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
A__ = field(
default=_lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ = field(
default=_lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ = field(
default=_lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A__ = field(
default=_lowercase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
A__ = field(
default=_lowercase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
A__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ = field(
default=_lowercase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ = field(
default=_lowercase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __lowerCAmelCase ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ : List[str] = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
datasets.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase__ : str = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase__ : List[Any] = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : str = train_dataset.features['''label'''].names
if training_args.do_eval:
lowerCAmelCase__ : List[Any] = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Tuple = eval_dataset.features['''label'''].names
if training_args.do_predict:
lowerCAmelCase__ : Any = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Dict = predict_dataset.features['''label'''].names
# Labels
lowerCAmelCase__ : List[str] = len(UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase , idalabel={str(UpperCamelCase ): label for i, label in enumerate(UpperCamelCase )} , labelaid={label: i for i, label in enumerate(UpperCamelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ : List[Any] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ : Optional[int] = False
def preprocess_function(UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=UpperCamelCase , max_length=data_args.max_seq_length , truncation=UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ : int = min(len(UpperCamelCase ) , data_args.max_train_samples )
lowerCAmelCase__ : Dict = train_dataset.select(range(UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCAmelCase__ : Optional[int] = train_dataset.map(
UpperCamelCase , batched=UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCamelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ : List[str] = min(len(UpperCamelCase ) , data_args.max_eval_samples )
lowerCAmelCase__ : List[Any] = eval_dataset.select(range(UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCAmelCase__ : int = eval_dataset.map(
UpperCamelCase , batched=UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase__ : Dict = min(len(UpperCamelCase ) , data_args.max_predict_samples )
lowerCAmelCase__ : Any = predict_dataset.select(range(UpperCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowerCAmelCase__ : Union[str, Any] = predict_dataset.map(
UpperCamelCase , batched=UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowerCAmelCase__ : Optional[int] = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase ):
lowerCAmelCase__ : List[str] = p.predictions[0] if isinstance(p.predictions , UpperCamelCase ) else p.predictions
lowerCAmelCase__ : str = np.argmax(UpperCamelCase , axis=1 )
return metric.compute(predictions=UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ : List[str] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ : Tuple = DataCollatorWithPadding(UpperCamelCase , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ : Dict = None
# Initialize our Trainer
lowerCAmelCase__ : Optional[int] = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
lowerCAmelCase__ : Any = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ : List[Any] = last_checkpoint
lowerCAmelCase__ : Any = trainer.train(resume_from_checkpoint=UpperCamelCase )
lowerCAmelCase__ : Dict = train_result.metrics
lowerCAmelCase__ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase )
)
lowerCAmelCase__ : int = min(UpperCamelCase , len(UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , UpperCamelCase )
trainer.save_metrics('''train''' , UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ : int = trainer.evaluate(eval_dataset=UpperCamelCase )
lowerCAmelCase__ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase )
lowerCAmelCase__ : List[str] = min(UpperCamelCase , len(UpperCamelCase ) )
trainer.log_metrics('''eval''' , UpperCamelCase )
trainer.save_metrics('''eval''' , UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = trainer.predict(UpperCamelCase , metric_key_prefix='''predict''' )
lowerCAmelCase__ : Any = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCamelCase )
)
lowerCAmelCase__ : int = min(UpperCamelCase , len(UpperCamelCase ) )
trainer.log_metrics('''predict''' , UpperCamelCase )
trainer.save_metrics('''predict''' , UpperCamelCase )
lowerCAmelCase__ : Tuple = np.argmax(UpperCamelCase , axis=1 )
lowerCAmelCase__ : List[Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(UpperCamelCase ):
lowerCAmelCase__ : Any = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 678
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_CAUSAL_LM_MAPPING
A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id
lowerCAmelCase__ : List[Any] = '''<pad>'''
lowerCAmelCase__ : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''Hello I believe in'''
lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator.model
lowerCAmelCase__ : Optional[int] = text_generator.tokenizer
lowerCAmelCase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : List[str] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : str = text_generator('''''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''Hello world'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 678
| 1
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : List[str]=3 , __lowercase : Optional[int]=32 , __lowercase : Tuple=3 , __lowercase : Tuple=10 , __lowercase : Dict=[10, 20, 30, 40] , __lowercase : Tuple=[1, 1, 2, 1] , __lowercase : Union[str, Any]=True , __lowercase : Any=True , __lowercase : List[Any]="relu" , __lowercase : List[Any]=3 , __lowercase : List[str]=None , ):
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(__lowercase )
def SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowercase : int , __lowercase : str ):
'''simple docstring'''
UpperCAmelCase_ = FlaxRegNetModel(config=__lowercase )
UpperCAmelCase_ = model(__lowercase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowercase : List[str] , __lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=__lowercase )
UpperCAmelCase_ = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( A_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : str = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase : Dict = False
lowerCamelCase : Any = False
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = FlaxRegNetModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__lowercase )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : Dict , __lowercase : List[str] , __lowercase : List[Any] ):
UpperCAmelCase_ = model_class(__lowercase )
UpperCAmelCase_ = model(**self._prepare_for_class(__lowercase , __lowercase ) )
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__lowercase , __lowercase )
UpperCAmelCase_ = model_class(__lowercase )
@jax.jit
def model_jitted(__lowercase : Tuple , **__lowercase : List[Any] ):
return model(pixel_values=__lowercase , **__lowercase )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase_ = model_jitted(**__lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for jitted_output, output in zip(__lowercase , __lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def A_( ):
UpperCAmelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__lowercase , return_tensors="""np""" )
UpperCAmelCase_ = model(**__lowercase )
# verify the logits
UpperCAmelCase_ = (1, 10_00)
self.assertEqual(outputs.logits.shape , __lowercase )
UpperCAmelCase_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 486
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModel.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModel.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForPreTraining.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
| 486
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __A ( unittest.TestCase ):
def _snake_case (self , __magic_name__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase__ : Dict = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase__ : List[str] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case (self ):
lowerCamelCase__ : List[str] = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
lowerCamelCase__ : List[str] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase__ : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase__ : int = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase__ : str = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case (self ):
lowerCamelCase__ : List[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase__ : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case (self ):
lowerCamelCase__ : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase__ : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _snake_case (self ):
lowerCamelCase__ : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase__ : Any = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase__ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase__ : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _snake_case (self ):
lowerCamelCase__ : int = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase__ : Any = TensorFlowBenchmark(__magic_name__ , configs=[config] )
lowerCamelCase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _snake_case (self ):
lowerCamelCase__ : int = """sshleifer/tiny-gpt2"""
lowerCamelCase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase__ : Union[str, Any] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case (self ):
lowerCamelCase__ : List[str] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , )
lowerCamelCase__ : Optional[Any] = TensorFlowBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() )
def _snake_case (self ):
lowerCamelCase__ : Any = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , """sequential""" ) )
self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) )
self.assertTrue(hasattr(__magic_name__ , """current""" ) )
self.assertTrue(hasattr(__magic_name__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase__ : List[str] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase__ : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
| 157
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase :Tuple = IFInpaintingSuperResolutionPipeline
UpperCamelCase :int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase :Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _snake_case (self ):
return self._get_superresolution_dummy_components()
def _snake_case (self , __magic_name__ , __magic_name__=0 ):
if str(__magic_name__ ).startswith("""mps""" ):
lowerCamelCase__ : Dict = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase__ : Tuple = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase__ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case (self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case (self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case (self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case (self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case (self ):
self._test_save_load_local()
def _snake_case (self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 157
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
__UpperCAmelCase : Dict = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCAmelCase : int = model(a_ )['''last_hidden_state''']
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , a_ )
# compare the actual values for a slice.
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 241
| 0
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*lowercase__ : Optional[int] ,**lowercase__ : List[str] ):
super().__init__(*lowercase__ ,**lowercase__ )
self.check_model_type(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : Tuple=None ,**lowercase__ : List[str] ):
__lowercase , __lowercase = {}, {}
if padding is not None:
__lowercase = padding
if truncation is not None:
__lowercase = truncation
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] ,lowercase__ : Union["Image.Image", str] ,lowercase__ : str = None ,**lowercase__ : Any ):
if isinstance(lowercase__ ,(Image.Image, str) ) and isinstance(lowercase__ ,lowercase__ ):
__lowercase = {'''image''': image, '''question''': question}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=False ,lowercase__ : List[str]=False ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = self.tokenizer(
inputs['''question'''] ,return_tensors=self.framework ,padding=lowercase__ ,truncation=lowercase__ )
__lowercase = self.image_processor(images=lowercase__ ,return_tensors=self.framework )
model_inputs.update(lowercase__ )
return model_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ):
__lowercase = self.model(**lowercase__ )
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Any=5 ):
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.sigmoid()[0]
__lowercase , __lowercase = probs.topk(lowercase__ )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase__ ,lowercase__ )]
| 41
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41
| 1
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=sys.maxsize ) -> Any:
UpperCamelCase__ = 'bilinear'
UpperCamelCase__ = max_size
UpperCamelCase__ = short_edge_length
def __call__( self , snake_case_ ) -> int:
UpperCamelCase__ = []
for img in imgs:
UpperCamelCase__ , UpperCamelCase__ = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCamelCase__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCamelCase__ = size * 1.0 / min(snake_case_ , snake_case_ )
if h < w:
UpperCamelCase__ , UpperCamelCase__ = size, scale * w
else:
UpperCamelCase__ , UpperCamelCase__ = scale * h, size
if max(snake_case_ , snake_case_ ) > self.max_size:
UpperCamelCase__ = self.max_size * 1.0 / max(snake_case_ , snake_case_ )
UpperCamelCase__ = newh * scale
UpperCamelCase__ = neww * scale
UpperCamelCase__ = int(neww + 0.5 )
UpperCamelCase__ = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCamelCase__ = Image.fromarray(snake_case_ )
UpperCamelCase__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCamelCase__ = np.asarray(snake_case_ )
else:
UpperCamelCase__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCamelCase__ = nn.functional.interpolate(
snake_case_ , (newh, neww) , mode=self.interp_method , align_corners=snake_case_ ).squeeze(0 )
img_augs.append(snake_case_ )
return img_augs
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> Any:
UpperCamelCase__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCamelCase__ = cfg.INPUT.FORMAT
UpperCamelCase__ = cfg.SIZE_DIVISIBILITY
UpperCamelCase__ = cfg.PAD_VALUE
UpperCamelCase__ = cfg.INPUT.MAX_SIZE_TEST
UpperCamelCase__ = cfg.MODEL.DEVICE
UpperCamelCase__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__ = lambda snake_case_ : (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = tuple(max(snake_case_ ) for s in zip(*[img.shape for img in images] ) )
UpperCamelCase__ = [im.shape[-2:] for im in images]
UpperCamelCase__ = [
nn.functional.pad(
snake_case_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case_ , snake_case_ )
]
return torch.stack(snake_case_ ), torch.tensor(snake_case_ )
def __call__( self , snake_case_ , snake_case_=False ) -> List[str]:
with torch.no_grad():
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = [images]
if single_image:
assert len(snake_case_ ) == 1
for i in range(len(snake_case_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case_ , images.pop(snake_case_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case_ , torch.as_tensor(img_tensorize(images.pop(snake_case_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCamelCase__ = torch.tensor([im.shape[:2] for im in images] )
UpperCamelCase__ = self.aug(snake_case_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCamelCase__ = [self.normalizer(snake_case_ ) for x in images]
# now pad them to do the following operations
UpperCamelCase__ , UpperCamelCase__ = self.pad(snake_case_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCamelCase__ = torch.true_divide(snake_case_ , snake_case_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
assert torch.isfinite(SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
UpperCamelCase__ , UpperCamelCase__ = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
| 20
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20
| 1
|
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
A_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( _a ):
def __init__( self : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,**SCREAMING_SNAKE_CASE__ : Dict ):
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = eval_examples
SCREAMING_SNAKE_CASE:Optional[int] = post_process_function
SCREAMING_SNAKE_CASE:List[str] = quant_trainer_args
SCREAMING_SNAKE_CASE:Optional[Any] = 128 # default number of calibration samples
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
SCREAMING_SNAKE_CASE:Optional[int] = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE:List[Any] = self._remove_unused_columns(SCREAMING_SNAKE_CASE__ ,description="Calibration" )
return DataLoader(
SCREAMING_SNAKE_CASE__ ,batch_size=self.args.eval_batch_size ,collate_fn=self.data_collator ,drop_last=self.args.dataloader_drop_last ,num_workers=self.args.dataloader_num_workers ,pin_memory=self.args.dataloader_pin_memory ,shuffle=SCREAMING_SNAKE_CASE__ ,)
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE:List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE:Optional[Any] = self.get_calib_dataloader(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = self.model
quant_trainer.configure_model(SCREAMING_SNAKE_CASE__ ,self.quant_trainer_args ,calib=SCREAMING_SNAKE_CASE__ )
model.eval()
quant_trainer.enable_calibration(SCREAMING_SNAKE_CASE__ )
logger.info("***** Running calibration *****" )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(SCREAMING_SNAKE_CASE__ ):
# Prediction step
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Union[str, Any] = self.prediction_step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prediction_loss_only=SCREAMING_SNAKE_CASE__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(SCREAMING_SNAKE_CASE__ ,self.quant_trainer_args )
SCREAMING_SNAKE_CASE:Optional[Any] = model
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : str = "eval" ):
SCREAMING_SNAKE_CASE:List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE:Optional[int] = self.get_eval_dataloader(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE:str = self.compute_metrics
SCREAMING_SNAKE_CASE:Union[str, Any] = None
SCREAMING_SNAKE_CASE:int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE:str = eval_loop(
SCREAMING_SNAKE_CASE__ ,description="Evaluation" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=SCREAMING_SNAKE_CASE__ ,)
finally:
SCREAMING_SNAKE_CASE:Dict = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE:Optional[Any] = self.post_process_function(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,output.predictions )
SCREAMING_SNAKE_CASE:List[Any] = self.compute_metrics(SCREAMING_SNAKE_CASE__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE:Union[str, Any] = metrics.pop(SCREAMING_SNAKE_CASE__ )
self.log(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:int = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE:int = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,SCREAMING_SNAKE_CASE__ )
return metrics
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : str = "test" ):
SCREAMING_SNAKE_CASE:List[str] = self.get_test_dataloader(SCREAMING_SNAKE_CASE__ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE:List[str] = self.compute_metrics
SCREAMING_SNAKE_CASE:int = None
SCREAMING_SNAKE_CASE:Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE:Optional[int] = eval_loop(
SCREAMING_SNAKE_CASE__ ,description="Prediction" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=SCREAMING_SNAKE_CASE__ ,)
finally:
SCREAMING_SNAKE_CASE:Dict = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE:Optional[Any] = self.post_process_function(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,output.predictions ,"predict" )
SCREAMING_SNAKE_CASE:Dict = self.compute_metrics(SCREAMING_SNAKE_CASE__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE:int = metrics.pop(SCREAMING_SNAKE_CASE__ )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="./" ):
SCREAMING_SNAKE_CASE:str = self.eval_dataset
SCREAMING_SNAKE_CASE:Optional[int] = self.get_eval_dataloader(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = next(iter(SCREAMING_SNAKE_CASE__ ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE:List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
SCREAMING_SNAKE_CASE:int = tuple(v.to(SCREAMING_SNAKE_CASE__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE:str = True
SCREAMING_SNAKE_CASE:Optional[Any] = self.model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
model.float()
SCREAMING_SNAKE_CASE:Any = model.module if hasattr(SCREAMING_SNAKE_CASE__ ,"module" ) else model
quant_trainer.configure_model(SCREAMING_SNAKE_CASE__ ,self.quant_trainer_args )
SCREAMING_SNAKE_CASE:Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,"model.onnx" )
logger.info(F'''exporting model to {output_model_file}''' )
SCREAMING_SNAKE_CASE:Optional[Any] = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,export_params=SCREAMING_SNAKE_CASE__ ,opset_version=13 ,do_constant_folding=SCREAMING_SNAKE_CASE__ ,input_names=["input_ids", "attention_mask", "token_type_ids"] ,output_names=["output_start_logits", "output_end_logits"] ,dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} ,verbose=SCREAMING_SNAKE_CASE__ ,)
logger.info("onnx export finished" )
| 143
|
'''simple docstring'''
def A_ ( snake_case = 1000 ):
SCREAMING_SNAKE_CASE:Tuple = 2**power
SCREAMING_SNAKE_CASE:Optional[int] = str(snake_case )
SCREAMING_SNAKE_CASE:int = list(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = 0
for i in list_num:
sum_of_num += int(snake_case )
return sum_of_num
if __name__ == "__main__":
A_ = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
A_ = solution(power)
print("Sum of the digits is: ", result)
| 143
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _snake_case ,_snake_case = None ):
UpperCAmelCase__ : List[Any] = word_bank or []
# create a table
UpperCAmelCase__ : int = len(_snake_case ) + 1
UpperCAmelCase__ : list[list[list[str]]] = []
for _ in range(_snake_case ):
table.append([] )
# seed value
UpperCAmelCase__ : List[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_snake_case ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_snake_case )] == word:
UpperCAmelCase__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_snake_case )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_snake_case )]:
combination.reverse()
return table[len(_snake_case )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 254
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[10, 20, 30, 40] , UpperCamelCase_=[2, 2, 3, 2] , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=["stage2", "stage3", "stage4"] , UpperCamelCase_=[2, 3, 4] , UpperCamelCase_=None , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : List[str] = num_stages
UpperCAmelCase__ : Optional[int] = hidden_sizes
UpperCAmelCase__ : int = depths
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : int = num_labels
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = out_features
UpperCAmelCase__ : Tuple = out_indices
UpperCAmelCase__ : Dict = scope
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ConvNextModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : int = model(UpperCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : str = ConvNextForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[int] = model(UpperCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Dict = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[int] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : str = True
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = False
def __snake_case ( self ):
UpperCAmelCase__ : str = ConvNextModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : str = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __snake_case ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = ConvNextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase ( ):
UpperCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase_ )
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : str = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**UpperCamelCase_ )
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@require_torch
class a ( unittest.TestCase , lowercase ):
UpperCamelCase : str = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase : List[str] = ConvNextConfig
UpperCamelCase : Tuple = False
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = ConvNextModelTester(self )
| 254
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : int ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : Tuple , *snake_case_ : int , **snake_case_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : Any , *snake_case_ : Any , **snake_case_ : Any ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 347
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __snake_case ( _lowerCAmelCase : Tuple ) -> Optional[Any]:
A_ : Optional[Any] = create_tensor(_lowerCAmelCase )
A_ : Dict = gather(_lowerCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Any:
A_ : int = [state.process_index]
A_ : Union[str, Any] = gather_object(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == state.num_processes, f"{gathered_obj}, {len(_lowerCAmelCase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Optional[Any] = broadcast(_lowerCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __snake_case ( _lowerCAmelCase : Dict ) -> str:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
A_ : Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A_ : List[str] = torch.arange(state.num_processes ).to(state.device )
A_ : Any = pad_across_processes(_lowerCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Tuple:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : str = create_tensor(_lowerCAmelCase )
A_ : int = reduce(_lowerCAmelCase , "sum" )
A_ : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Tuple = reduce(_lowerCAmelCase , "mean" )
A_ : List[str] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : List[str] ) -> Dict:
# For xla_spawn (TPUs)
main()
def __snake_case ( ) -> List[str]:
A_ : Tuple = PartialState()
state.print(f"State: {state}" )
state.print("testing gather" )
test_gather(_lowerCAmelCase )
state.print("testing gather_object" )
test_gather_object(_lowerCAmelCase )
state.print("testing broadcast" )
test_broadcast(_lowerCAmelCase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_lowerCAmelCase )
state.print("testing reduce_sum" )
test_reduce_sum(_lowerCAmelCase )
state.print("testing reduce_mean" )
test_reduce_mean(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ :Tuple = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
a_ :str = logging.get_logger(__name__)
a_ :List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
a_ :Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
a_ :Any = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def a ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ : str = bs[:]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ : str = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = set()
SCREAMING_SNAKE_CASE__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = char
return pairs
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Any="replace" , _lowercase : List[Any]="<s>" , _lowercase : int="</s>" , _lowercase : Tuple="</s>" , _lowercase : Tuple="<s>" , _lowercase : Tuple="<unk>" , _lowercase : List[Any]="<pad>" , _lowercase : List[Any]="<mask>" , _lowercase : Optional[int]=False , **_lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
SCREAMING_SNAKE_CASE__ : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : Tuple = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ : Dict = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Dict = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase__ ( self : Optional[Any] ):
return len(self.encoder )
def lowercase__ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Tuple , _lowercase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = get_pairs(_lowercase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ : Tuple = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = bigram
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Tuple = 0
while i < len(_lowercase ):
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Dict = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : List[Any] = tuple(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = new_word
if len(_lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Any = get_pairs(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = ''' '''.join(_lowercase )
SCREAMING_SNAKE_CASE__ : int = word
return word
def lowercase__ ( self : Optional[Any] , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for token in re.findall(self.pat , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(''' ''' ) )
return bpe_tokens
def lowercase__ ( self : int , _lowercase : List[str] ):
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : int , _lowercase : int ):
return self.decoder.get(_lowercase )
def lowercase__ ( self : List[str] , _lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : int = ''''''.join(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase__ ( self : List[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : int = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : str = 0
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : str = token_index
writer.write(''' '''.join(_lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def lowercase__ ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Dict , _lowercase : Dict , _lowercase : List[str]=False , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : str = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ : Any = ''' ''' + text
return (text, kwargs)
def lowercase__ ( self : int , _lowercase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowercase : Optional[int] = None , _lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE__ : Any = super()._pad(
encoded_inputs=_lowercase , max_length=_lowercase , padding_strategy=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE__ : List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowercase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE__ : Dict = len(_lowercase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE__ : Any = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE__ : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 250
| 0
|
import inspect
import unittest
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def snake_case ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE_ : List[str] = inspect.getmembers(snake_case__ ,inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE_ : int = 'k-diffusion'
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'invisible-watermark'
assert backend in deps, F'{backend} is not in the deps table!'
| 105
|
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658
| 0
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__a: int = (7_20, 12_80) # Height, Width
__a: Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
__a: Optional[int] = 1 / 1_00
__a: Any = """"""
__a: List[Any] = """"""
__a: List[Any] = """"""
__a: Any = 2_50
def __UpperCamelCase ( ):
lowercase__ , lowercase__ : Union[str, Any] = get_dataset(UpperCAmelCase , UpperCAmelCase )
for index in range(UpperCAmelCase ):
lowercase__ : Tuple = random.sample(range(len(UpperCAmelCase ) ) , 4 )
lowercase__ , lowercase__ , lowercase__ : int = update_image_and_anno(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , filter_scale=UpperCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ : Tuple = random_chars(32 )
lowercase__ : Any = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase__ : int = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
lowercase__ : List[str] = []
for anno in new_annos:
lowercase__ : Any = anno[3] - anno[1]
lowercase__ : Tuple = anno[4] - anno[2]
lowercase__ : Optional[Any] = anno[1] + width / 2
lowercase__ : int = anno[2] + height / 2
lowercase__ : Dict = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(UpperCAmelCase )
with open(F"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = []
lowercase__ : str = []
for label_file in glob.glob(os.path.join(UpperCAmelCase , '''*.txt''' ) ):
lowercase__ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCAmelCase ) as in_file:
lowercase__ : Optional[int] = in_file.readlines()
lowercase__ : List[str] = os.path.join(UpperCAmelCase , F"""{label_name}.jpg""" )
lowercase__ : Any = []
for obj_list in obj_lists:
lowercase__ : int = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase__ : Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2
lowercase__ : Optional[int] = float(obj[2] ) - float(obj[4] ) / 2
lowercase__ : Any = float(obj[1] ) + float(obj[3] ) / 2
lowercase__ : Dict = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCAmelCase )
labels.append(UpperCAmelCase )
return img_paths, labels
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , ):
lowercase__ : Dict = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase__ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ : List[Any] = int(scale_x * output_size[1] )
lowercase__ : List[str] = int(scale_y * output_size[0] )
lowercase__ : Any = []
lowercase__ : Dict = []
for i, index in enumerate(UpperCAmelCase ):
lowercase__ : str = all_img_list[index]
path_list.append(UpperCAmelCase )
lowercase__ : Dict = all_annos[index]
lowercase__ : List[Any] = cva.imread(UpperCAmelCase )
if i == 0: # top-left
lowercase__ : Union[str, Any] = cva.resize(UpperCAmelCase , (divid_point_x, divid_point_y) )
lowercase__ : Optional[Any] = img
for bbox in img_annos:
lowercase__ : int = bbox[1] * scale_x
lowercase__ : Optional[Any] = bbox[2] * scale_y
lowercase__ : Optional[int] = bbox[3] * scale_x
lowercase__ : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase__ : Union[str, Any] = cva.resize(UpperCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
lowercase__ : Tuple = img
for bbox in img_annos:
lowercase__ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase__ : Optional[int] = bbox[2] * scale_y
lowercase__ : Dict = scale_x + bbox[3] * (1 - scale_x)
lowercase__ : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase__ : Union[str, Any] = cva.resize(UpperCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
lowercase__ : Dict = img
for bbox in img_annos:
lowercase__ : Tuple = bbox[1] * scale_x
lowercase__ : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
lowercase__ : List[str] = bbox[3] * scale_x
lowercase__ : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase__ : List[Any] = cva.resize(
UpperCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase__ : Any = img
for bbox in img_annos:
lowercase__ : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
lowercase__ : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
lowercase__ : int = scale_x + bbox[3] * (1 - scale_x)
lowercase__ : List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase__ : List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCamelCase ( UpperCAmelCase ):
assert number_char > 1, "The number of character should greater than 1"
lowercase__ : int = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase ) for _ in range(UpperCAmelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 428
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : Tuple = position
lowercase__ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ : Tuple = []
for position in positions:
lowercase__ , lowercase__ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCAmelCase )
return permissible_positions
def __UpperCamelCase ( UpperCAmelCase ):
return not any(elem == 0 for row in board for elem in row )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if is_complete(UpperCAmelCase ):
return True
for position in get_valid_pos(UpperCAmelCase , len(UpperCAmelCase ) ):
lowercase__ , lowercase__ : List[Any] = position
if board[y][x] == 0:
lowercase__ : Optional[Any] = curr + 1
if open_knight_tour_helper(UpperCAmelCase , UpperCAmelCase , curr + 1 ):
return True
lowercase__ : Tuple = 0
return False
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Any = [[0 for i in range(UpperCAmelCase )] for j in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
lowercase__ : Optional[Any] = 1
if open_knight_tour_helper(UpperCAmelCase , (i, j) , 1 ):
return board
lowercase__ : Union[str, Any] = 0
lowercase__ : List[str] = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->tuple[np.ndarray, np.ndarray]:
_lowerCamelCase, _lowerCamelCase : List[str] = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
_lowerCamelCase : int = (
'''\'table\' has to be of square shaped array but got a '''
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[str] = np.zeros((rows, columns) )
_lowerCamelCase : str = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_lowerCamelCase : Optional[int] = (table[i][j] - total) / upper[j][j]
_lowerCamelCase : Any = 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : Any = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
_lowerCamelCase : Dict = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->np.array:
_lowerCamelCase : Tuple = int(np.ceil((x_end - xa) / step_size ) )
_lowerCamelCase : List[str] = np.zeros((n + 1,) )
_lowerCamelCase : Optional[int] = ya
_lowerCamelCase : List[str] = xa
for k in range(SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : str = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE_ , y[k] )
_lowerCamelCase : int = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE_ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434
| 1
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : int = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 445
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'wavlm'
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=320 , _lowerCamelCase=800 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=(512, 512, 512, 512, 1500) , _lowerCamelCase=(5, 3, 3, 1, 1) , _lowerCamelCase=(1, 2, 3, 1, 1) , _lowerCamelCase=512 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
a :Union[str, Any] = hidden_size
a :List[Any] = feat_extract_norm
a :Tuple = feat_extract_activation
a :Tuple = list(_lowerCamelCase )
a :Optional[Any] = list(_lowerCamelCase )
a :Optional[Any] = list(_lowerCamelCase )
a :int = conv_bias
a :Tuple = num_buckets
a :Tuple = max_bucket_distance
a :int = num_conv_pos_embeddings
a :List[str] = num_conv_pos_embedding_groups
a :Tuple = len(self.conv_dim )
a :Any = num_hidden_layers
a :Tuple = intermediate_size
a :Any = hidden_act
a :List[str] = num_attention_heads
a :int = hidden_dropout
a :Optional[Any] = attention_dropout
a :Dict = activation_dropout
a :Any = feat_proj_dropout
a :Dict = final_dropout
a :int = layerdrop
a :Tuple = layer_norm_eps
a :Union[str, Any] = initializer_range
a :Union[str, Any] = num_ctc_classes
a :Optional[Any] = vocab_size
a :str = do_stable_layer_norm
a :Dict = use_weighted_layer_sum
a :Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a :List[Any] = apply_spec_augment
a :Optional[int] = mask_time_prob
a :Optional[Any] = mask_time_length
a :Tuple = mask_time_min_masks
a :Dict = mask_feature_prob
a :List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
a :Tuple = num_codevectors_per_group
a :List[str] = num_codevector_groups
a :List[Any] = contrastive_logits_temperature
a :Union[str, Any] = num_negatives
a :Union[str, Any] = codevector_dim
a :Dict = proj_codevector_dim
a :Union[str, Any] = diversity_loss_weight
# ctc loss
a :Dict = ctc_loss_reduction
a :Optional[int] = ctc_zero_infinity
# adapter
a :Any = add_adapter
a :str = adapter_kernel_size
a :Tuple = adapter_stride
a :Dict = num_adapter_layers
a :Any = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a :Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a :List[Any] = list(_lowerCamelCase )
a :Optional[int] = list(_lowerCamelCase )
a :List[str] = list(_lowerCamelCase )
a :Optional[int] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 445
| 1
|
"""simple docstring"""
def __magic_name__ ( __snake_case : int = 1000 ) -> int:
lowercase , lowercase : Optional[Any] = 1, 1
lowercase : int = []
for i in range(1 , n + 1 ):
lowercase : List[str] = prev_numerator + 2 * prev_denominator
lowercase : int = prev_numerator + prev_denominator
if len(str(__snake_case ) ) > len(str(__snake_case ) ):
result.append(__snake_case )
lowercase : Tuple = numerator
lowercase : List[Any] = denominator
return len(__snake_case )
if __name__ == "__main__":
print(F"{solution() = }")
| 518
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : int = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class a__ ( a_ ):
__lowerCAmelCase = """mgp-str"""
def __init__( self , _a=[32, 128] , _a=4 , _a=3 , _a=27 , _a=38 , _a=50_257 , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=4.0 , _a=True , _a=False , _a=1E-5 , _a=0.0 , _a=0.0 , _a=0.0 , _a=False , _a=0.0_2 , **_a , ):
super().__init__(**_a )
lowercase : Tuple = image_size
lowercase : Optional[Any] = patch_size
lowercase : List[str] = num_channels
lowercase : Optional[int] = max_token_length
lowercase : List[Any] = num_character_labels
lowercase : int = num_bpe_labels
lowercase : Any = num_wordpiece_labels
lowercase : Optional[Any] = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : List[Any] = mlp_ratio
lowercase : Optional[int] = distilled
lowercase : List[Any] = layer_norm_eps
lowercase : Any = drop_rate
lowercase : List[Any] = qkv_bias
lowercase : int = attn_drop_rate
lowercase : Union[str, Any] = drop_path_rate
lowercase : Optional[Any] = output_aa_attentions
lowercase : Dict = initializer_range
| 518
| 1
|
from __future__ import annotations
import numpy as np
def _A ( _lowercase ) -> int:
"""simple docstring"""
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 1
|
"""simple docstring"""
def UpperCAmelCase_ ( __a : float , __a : float ):
'''simple docstring'''
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437
| 0
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase__ (UpperCamelCase__ ):
'''simple docstring'''
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" ,__A ,)
super().__init__(*__A ,**__A )
| 714
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 9
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def a_ ( lowerCAmelCase_ : np.ndarray ):
return input_array.reshape((input_array.size, 1) )
def a_ ( lowerCAmelCase_ : np.ndarray, lowerCAmelCase_ : np.ndarray, lowerCAmelCase_ : int ):
__lowerCAmelCase = np.nan
for i in range(lowerCAmelCase_ ):
__lowerCAmelCase = features[:, labels == i]
__lowerCAmelCase = data.mean(1 )
# Centralize the data of class i
__lowerCAmelCase = data - column_reshape(lowerCAmelCase_ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCAmelCase_, centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCAmelCase = np.dot(lowerCAmelCase_, centered_data.T )
return covariance_sum / features.shape[1]
def a_ ( lowerCAmelCase_ : np.ndarray, lowerCAmelCase_ : np.ndarray, lowerCAmelCase_ : int ):
__lowerCAmelCase = features.mean(1 )
__lowerCAmelCase = np.nan
for i in range(lowerCAmelCase_ ):
__lowerCAmelCase = features[:, labels == i]
__lowerCAmelCase = data.shape[1]
__lowerCAmelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCAmelCase_ ) - column_reshape(lowerCAmelCase_ ), (column_reshape(lowerCAmelCase_ ) - column_reshape(lowerCAmelCase_ )).T, )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCAmelCase = device_data * np.dot(
column_reshape(lowerCAmelCase_ ) - column_reshape(lowerCAmelCase_ ), (column_reshape(lowerCAmelCase_ ) - column_reshape(lowerCAmelCase_ )).T, )
return covariance_sum / features.shape[1]
def a_ ( lowerCAmelCase_ : np.ndarray, lowerCAmelCase_ : int ):
# Check if the features have been loaded
if features.any():
__lowerCAmelCase = features.mean(1 )
# Center the dataset
__lowerCAmelCase = features - np.reshape(lowerCAmelCase_, (data_mean.size, 1) )
__lowerCAmelCase = np.dot(lowerCAmelCase_, centered_data.T ) / features.shape[1]
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCAmelCase_ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCAmelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCAmelCase = np.dot(filtered_eigenvectors.T, lowerCAmelCase_ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format='%(message)s', force=lowerCAmelCase_ )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( lowerCAmelCase_ : np.ndarray, lowerCAmelCase_ : np.ndarray, lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCAmelCase , __lowerCAmelCase = eigh(
covariance_between_classes(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), covariance_within_classes(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
__lowerCAmelCase = eigenvectors[:, ::-1][:, :dimensions]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = np.linalg.svd(lowerCAmelCase_ )
__lowerCAmelCase = svd_matrix[:, 0:dimensions]
__lowerCAmelCase = np.dot(filtered_svd_matrix.T, lowerCAmelCase_ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format='%(message)s', force=lowerCAmelCase_ )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( ):
# Create dummy dataset with 2 classes and 3 features
__lowerCAmelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCAmelCase = np.array([0, 0, 0, 1, 1] )
__lowerCAmelCase = 2
__lowerCAmelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCAmelCase_ ) as error_info:
__lowerCAmelCase = linear_discriminant_analysis(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if isinstance(lowerCAmelCase_, np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def a_ ( ):
__lowerCAmelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCAmelCase = 2
__lowerCAmelCase = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCAmelCase_ ) as error_info:
__lowerCAmelCase = principal_component_analysis(lowerCAmelCase_, lowerCAmelCase_ )
if not np.allclose(lowerCAmelCase_, lowerCAmelCase_ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowerCamelCase__ = '▁'
# Segments (not really needed)
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 3
lowerCamelCase__ = 4
class UpperCamelCase ( snake_case__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = """left"""
__UpperCamelCase = XLNetTokenizer
def __init__( self : List[str] ,_lowerCAmelCase : List[Any]=None ,_lowerCAmelCase : Optional[Any]=None ,_lowerCAmelCase : Optional[Any]=False ,_lowerCAmelCase : int=True ,_lowerCAmelCase : List[str]=False ,_lowerCAmelCase : Union[str, Any]="<s>" ,_lowerCAmelCase : str="</s>" ,_lowerCAmelCase : Tuple="<unk>" ,_lowerCAmelCase : Tuple="<sep>" ,_lowerCAmelCase : str="<pad>" ,_lowerCAmelCase : Any="<cls>" ,_lowerCAmelCase : Any="<mask>" ,_lowerCAmelCase : List[Any]=["<eop>", "<eod>"] ,**_lowerCAmelCase : Any ,):
"""simple docstring"""
__snake_case = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,remove_space=_lowerCAmelCase ,keep_accents=_lowerCAmelCase ,bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,**_lowerCAmelCase ,)
__snake_case = 3
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : List[int] ,_lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase_ ( self : Any ,_lowerCAmelCase : List[int] ,_lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase_ ( self : List[Any] ,_lowerCAmelCase : str ,_lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case = os.path.join(
_lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file ,_lowerCAmelCase )
return (out_vocab_file,)
| 524
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase = logging.get_logger(__name__)
lowercase = {"""vocab_file""": """spiece.model"""}
lowercase = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
snake_case__ : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else mask_token
snake_case__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
snake_case__ : Optional[Any] = 3
snake_case__ : Tuple = do_lower_case
snake_case__ : List[str] = remove_space
snake_case__ : Any = keep_accents
snake_case__ : str = vocab_file
snake_case__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase__)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation.")
snake_case__ : List[Any] = jieba
snake_case__ : Union[str, Any] = str.maketrans(" \n" , "\u2582\u2583")
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return len(self.sp_model)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = self.__dict__.copy()
snake_case__ : Any = None
return state
def __setstate__( self , lowerCamelCase__) -> int:
'''simple docstring'''
snake_case__ : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
snake_case__ : Optional[int] = {}
snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase ( self , lowerCamelCase__) -> int:
'''simple docstring'''
if self.remove_space:
snake_case__ : Tuple = " ".join(inputs.strip().split())
else:
snake_case__ : Tuple = inputs
snake_case__ : str = outputs.replace("``" , "\"").replace("''" , "\"")
if not self.keep_accents:
snake_case__ : Optional[Any] = unicodedata.normalize("NFKD" , lowerCamelCase__)
snake_case__ : List[Any] = "".join([c for c in outputs if not unicodedata.combining(lowerCamelCase__)])
if self.do_lower_case:
snake_case__ : str = outputs.lower()
return outputs
def UpperCAmelCase ( self , lowerCamelCase__) -> List[str]:
'''simple docstring'''
snake_case__ : List[Any] = self.preprocess_text(lowerCamelCase__)
snake_case__ : Dict = self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__)
snake_case__ : Tuple = []
for piece in pieces:
if len(lowerCamelCase__) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
snake_case__ : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ , ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
snake_case__ : Optional[Any] = cur_pieces[1:]
else:
snake_case__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase__)
else:
new_pieces.append(lowerCamelCase__)
return new_pieces
def UpperCAmelCase ( self , lowerCamelCase__) -> Tuple:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__) -> int:
'''simple docstring'''
snake_case__ : Tuple = "".join(lowerCamelCase__).replace(lowerCamelCase__ , " ").strip()
return out_string
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> List[int]:
'''simple docstring'''
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase__)) + [1] + ([0] * len(lowerCamelCase__)) + [1, 1]
return ([0] * len(lowerCamelCase__)) + [1, 1]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> List[int]:
'''simple docstring'''
snake_case__ : str = [self.sep_token_id]
snake_case__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase__ , "wb") as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__)
return (out_vocab_file,)
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = super()._decode(*lowerCamelCase__ , **lowerCamelCase__)
snake_case__ : int = text.replace(" " , "").replace("\u2582" , " ").replace("\u2583" , "\n")
return text
| 713
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase):
'''simple docstring'''
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
snake_case__ : Dict = "ylacombe/bark-small"
snake_case__ : str = tempfile.mkdtemp()
snake_case__ : Optional[int] = "en_speaker_1"
snake_case__ : Any = "This is a test string"
snake_case__ : str = "speaker_embeddings_path.json"
snake_case__ : str = "speaker_embeddings"
def UpperCAmelCase ( self , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase__)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[str] = BarkProcessor(tokenizer=lowerCamelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case__ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
snake_case__ : Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
snake_case__ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case__ : Optional[int] = 35
snake_case__ : Tuple = 2
snake_case__ : Union[str, Any] = 8
snake_case__ : Union[str, Any] = {
"semantic_prompt": np.ones(lowerCamelCase__),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
snake_case__ : List[Any] = processor(text=self.input_string , voice_preset=lowerCamelCase__)
snake_case__ : List[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase__ , np.array([])).tolist())
# test loading voice preset from npz file
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , "file.npz")
np.savez(lowerCamelCase__ , **lowerCamelCase__)
snake_case__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCamelCase__)
snake_case__ : Optional[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase__ , np.array([])).tolist())
# test loading voice preset from the hub
snake_case__ : Dict = processor(text=self.input_string , voice_preset=self.voice_preset)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
snake_case__ : Any = self.get_tokenizer()
snake_case__ : int = BarkProcessor(tokenizer=lowerCamelCase__)
snake_case__ : List[str] = processor(text=self.input_string)
snake_case__ : int = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 150
| 0
|
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , ):
UpperCAmelCase_ = cipher_alphabet or [chr(lowerCAmelCase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase_ = {
"a": 0.08497,
"b": 0.01492,
"c": 0.02202,
"d": 0.04253,
"e": 0.11162,
"f": 0.02228,
"g": 0.02015,
"h": 0.06094,
"i": 0.07546,
"j": 0.00153,
"k": 0.01292,
"l": 0.04025,
"m": 0.02406,
"n": 0.06749,
"o": 0.07507,
"p": 0.01929,
"q": 0.00095,
"r": 0.07587,
"s": 0.06327,
"t": 0.09356,
"u": 0.02758,
"v": 0.00978,
"w": 0.02560,
"x": 0.00150,
"y": 0.01994,
"z": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCAmelCase_ = frequencies_dict
if not case_sensitive:
UpperCAmelCase_ = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase_ = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase_ = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase_ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase_ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ = decrypted_with_shift.lower().count(lowerCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ = decrypted_with_shift.count(lowerCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase_ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCAmelCase__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase_ = min(
lowerCAmelCase__ , key=lowerCAmelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 82
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCAmelCase( __lowerCamelCase ):
__a = []
for line in lines:
__a = re.sub(r'#.*' , '' , __lowerCamelCase ) # remove comments
if line:
filtered_lines.append(__lowerCamelCase )
__a = '\n'.join(__lowerCamelCase )
# Make a hash from all this code
__a = full_str.encode('utf-8' )
return shaaaa(__lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase_ : int = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase_ : List[Any] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase_ : int = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
lowerCamelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 559
| 0
|
def __lowercase ( snake_case ):
"""simple docstring"""
if len(snake_case ) <= 1:
return [tuple(snake_case )]
__magic_name__ :str = []
def generate(snake_case, snake_case ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1, snake_case )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__magic_name__ , __magic_name__ :Optional[int] = arr[k - 1], arr[i]
else: # k is odd
__magic_name__ , __magic_name__ :List[Any] = arr[k - 1], arr[0]
generate(k - 1, snake_case )
generate(len(snake_case ), snake_case )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 180
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
SCREAMING_SNAKE_CASE__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ : int = logging.getLogger()
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :int = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__magic_name__ :Tuple = parser.parse_args()
return args.f
def __lowercase ( snake_case, snake_case="eval" ):
"""simple docstring"""
__magic_name__ :str = os.path.join(snake_case, f'''{split}_results.json''' )
if os.path.exists(snake_case ):
with open(snake_case, '''r''' ) as f:
return json.load(snake_case )
raise ValueError(f'''can\'t find {path}''' )
SCREAMING_SNAKE_CASE__ : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.get_auto_remove_tmp_dir()
__magic_name__ :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_flax_glue.main()
__magic_name__ :int = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_clm_flax.main()
__magic_name__ :int = get_results(__lowerCAmelCase )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.get_auto_remove_tmp_dir()
__magic_name__ :List[str] = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_summarization_flax.main()
__magic_name__ :int = get_results(__lowerCAmelCase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Optional[Any] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_mlm_flax.main()
__magic_name__ :int = get_results(__lowerCAmelCase )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.get_auto_remove_tmp_dir()
__magic_name__ :List[Any] = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_ta_mlm_flax.main()
__magic_name__ :Dict = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self ):
"""simple docstring"""
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__magic_name__ :int = 7 if get_gpu_count() > 1 else 2
__magic_name__ :str = self.get_auto_remove_tmp_dir()
__magic_name__ :str = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_flax_ner.main()
__magic_name__ :Any = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.get_auto_remove_tmp_dir()
__magic_name__ :int = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_qa.main()
__magic_name__ :int = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 180
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : List[Any] , *_A : Tuple , **_A : Optional[int] ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : int , *_A : List[Any] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *_A : Optional[int] , **_A : str ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : List[str] , *_A : Optional[Any] , **_A : Any ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Any , *_A : Optional[Any] , **_A : Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *_A : Optional[Any] , **_A : str ) -> str:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : List[str] , *_A : List[Any] , **_A : List[str] ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Optional[Any] , *_A : List[str] , **_A : Tuple ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[Any] , *_A : List[str] , **_A : List[str] ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : Optional[int] , *_A : int , **_A : List[str] ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *_A : Optional[Any] , **_A : str ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *_A : Optional[int] , **_A : Optional[int] ) -> str:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : Tuple , *_A : Optional[Any] , **_A : Dict ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *_A : List[str] , **_A : Optional[Any] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *_A : int , **_A : Optional[Any] ) -> int:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : Optional[int] , *_A : Optional[int] , **_A : Tuple ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : List[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : str , *_A : Dict , **_A : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : int , *_A : Optional[int] , **_A : str ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *_A : Any , **_A : List[str] ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : Optional[Any] , *_A : List[Any] , **_A : Optional[int] ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : int , *_A : List[Any] , **_A : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *_A : Dict , **_A : List[Any] ) -> str:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : Tuple , *_A : Dict , **_A : Dict ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Dict , *_A : Tuple , **_A : Optional[int] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *_A : Dict , **_A : List[Any] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : List[Any] , *_A : str , **_A : Optional[int] ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : List[Any] , *_A : Tuple , **_A : Any ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Union[str, Any] , *_A : Union[str, Any] , **_A : List[Any] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : Tuple , *_A : Dict , **_A : Dict ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Dict , *_A : Tuple , **_A : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : Union[str, Any] , *_A : Any , **_A : Optional[int] ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : int , *_A : List[Any] , **_A : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *_A : Union[str, Any] , **_A : str ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=a_):
a_ = ['''flax''']
def __init__( self : List[str] , *_A : int , **_A : Union[str, Any] ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *_A : int , **_A : List[str] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[Any] , *_A : Any , **_A : Optional[int] ) -> Any:
requires_backends(cls , ['''flax'''] )
| 541
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a__ ( a_ ):
'''simple docstring'''
A : Tuple = '''xlm-roberta-xl'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=250_880 , lowerCAmelCase_ : Optional[int]=2_560 , lowerCAmelCase_ : Optional[Any]=36 , lowerCAmelCase_ : List[str]=32 , lowerCAmelCase_ : Optional[Any]=10_240 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=514 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : List[str]=1E-05 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Optional[int]="absolute" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : List[str] , ) -> str:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__A= vocab_size
__A= hidden_size
__A= num_hidden_layers
__A= num_attention_heads
__A= hidden_act
__A= intermediate_size
__A= hidden_dropout_prob
__A= attention_probs_dropout_prob
__A= max_position_embeddings
__A= type_vocab_size
__A= initializer_range
__A= layer_norm_eps
__A= position_embedding_type
__A= use_cache
__A= classifier_dropout
class a__ ( a_ ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__A= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 186
| 0
|
def lowerCamelCase__ ( ):
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while i * i <= n:
SCREAMING_SNAKE_CASE : str = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase__ ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(lowercase ) > 500 )
if __name__ == "__main__":
print(solution())
| 488
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__UpperCamelCase : int = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__UpperCamelCase : Dict = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__UpperCamelCase : Tuple = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def __A ( self : int , UpperCamelCase__ : List[List[List[str]]] , UpperCamelCase__ : List[List[str]] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase__ , hypotheses=UpperCamelCase__ , min_len=UpperCamelCase__ , max_len=UpperCamelCase__ )
}
| 248
|
def A ( _lowercase = 1_000_000 ):
SCREAMING_SNAKE_CASE : Optional[int] = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
SCREAMING_SNAKE_CASE : int = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 248
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _UpperCAmelCase ( unittest.TestCase ):
a = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Union[str, Any] = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A_ : List[str] = VideoClassificationPipeline(model=a__ , image_processor=a__ , top_k=2 )
A_ : int = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _lowerCamelCase ( self , a__ , a__ ):
for example in examples:
A_ : Optional[int] = video_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def _lowerCamelCase ( self ):
A_ : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
A_ : Optional[Any] = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
A_ : int = pipeline(
"""video-classification""" , model=a__ , feature_extractor=a__ , frame_sampling_rate=4 )
A_ : str = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A_ : int = video_classifier(a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
A_ : Union[str, Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def _lowerCamelCase ( self ):
pass
| 481
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=0.6 , a__=None , ):
A_ : int = parent
A_ : Optional[int] = batch_size
A_ : Any = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : str = is_training
A_ : str = use_labels
A_ : str = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Any = intermediate_size
A_ : List[Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : str = type_sequence_label_size
A_ : int = initializer_range
A_ : List[Any] = mask_ratio
A_ : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ : Optional[Any] = (image_size // patch_size) ** 2
A_ : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self ):
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Optional[int] = ViTMAEModel(config=a__ )
model.to(a__ )
model.eval()
A_ : int = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : int = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
A_ : Optional[Any] = model(a__ )
A_ : Dict = (self.image_size // self.patch_size) ** 2
A_ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ : Optional[int] = 1
A_ : Any = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[Any] = model(a__ )
A_ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self ):
A_ : Optional[Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Any = config_and_inputs
A_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def _lowerCamelCase ( self ):
A_ : int = ViTMAEModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def _lowerCamelCase ( self ):
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(a__ )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _lowerCamelCase ( self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowerCamelCase ( self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
# make masks reproducible
np.random.seed(2 )
A_ : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : Optional[Any] = torch.from_numpy(a__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ : Any = pt_noise
super().check_pt_tf_models(a__ , a__ , a__ )
def _lowerCamelCase ( self ):
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(a__ )
model.to(a__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : int = outputs[0].cpu().numpy()
A_ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
A_ : Union[str, Any] = model_class.from_pretrained(a__ )
model.to(a__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(a__ , a__ ) )
# Make sure we don't have nans
A_ : Optional[int] = after_outputs[0].cpu().numpy()
A_ : str = 0
A_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = ViTMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(a__ )
A_ : Optional[Any] = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : Union[str, Any] = image_processor(images=a__ , return_tensors="""pt""" ).to(a__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ : Optional[int] = ViTMAEConfig()
A_ : Tuple = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ : Dict = model(**a__ , noise=torch.from_numpy(a__ ).to(device=a__ ) )
# verify the logits
A_ : Tuple = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , a__ )
A_ : List[str] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(a__ ) , atol=1E-4 ) )
| 481
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 259
| 0
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__snake_case : str = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self :int , __lowercase :int , __lowercase :int , __lowercase :Optional[int] = None , __lowercase :int = 5_0257 , __lowercase :int = 1024 , __lowercase :int = 768 , __lowercase :int = 12 , __lowercase :int = 12 , __lowercase :Optional[int] = None , __lowercase :str = "gelu_new" , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 1e-5 , __lowercase :float = 0.02 , __lowercase :bool = True , __lowercase :bool = True , __lowercase :bool = False , __lowercase :bool = False , ):
super().__init__()
__lowerCamelCase : List[Any] =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
__lowerCamelCase : Optional[int] =prefix_inner_dim
__lowerCamelCase : Tuple =prefix_hidden_dim
__lowerCamelCase : Dict =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__lowerCamelCase : str =(
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__lowerCamelCase : List[Any] =GPTaConfig(
vocab_size=lowerCAmelCase_ , n_positions=lowerCAmelCase_ , n_embd=lowerCAmelCase_ , n_layer=lowerCAmelCase_ , n_head=lowerCAmelCase_ , n_inner=lowerCAmelCase_ , activation_function=lowerCAmelCase_ , resid_pdrop=lowerCAmelCase_ , embd_pdrop=lowerCAmelCase_ , attn_pdrop=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , scale_attn_by_inverse_layer_idx=lowerCAmelCase_ , reorder_and_upcast_attn=lowerCAmelCase_ , )
__lowerCamelCase : int =GPTaLMHeadModel(lowerCAmelCase_ )
def __lowercase ( self :int , __lowercase :torch.Tensor , __lowercase :torch.Tensor , __lowercase :Optional[torch.Tensor] = None , __lowercase :Optional[torch.Tensor] = None , ):
__lowerCamelCase : str =self.transformer.transformer.wte(lowerCAmelCase_ )
__lowerCamelCase : Optional[Any] =self.encode_prefix(lowerCAmelCase_ )
__lowerCamelCase : Union[str, Any] =self.decode_prefix(lowerCAmelCase_ )
__lowerCamelCase : List[str] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__lowerCamelCase : Union[str, Any] =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__lowerCamelCase : Any =torch.cat((dummy_token, input_ids) , dim=1 )
__lowerCamelCase : Tuple =self.transformer(inputs_embeds=lowerCAmelCase_ , labels=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowercase ( self :Optional[int] , __lowercase :int , __lowercase :torch.device ):
return torch.zeros(lowerCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase_ )
def __lowercase ( self :List[str] , __lowercase :List[Any] ):
return self.encode_prefix(lowerCAmelCase_ )
@torch.no_grad()
def __lowercase ( self :Tuple , __lowercase :str , __lowercase :Dict , __lowercase :Union[str, Any] ):
__lowerCamelCase : Union[str, Any] =torch.split(lowerCAmelCase_ , 1 , dim=0 )
__lowerCamelCase : int =[]
__lowerCamelCase : Any =[]
for feature in features:
__lowerCamelCase : Dict =self.decode_prefix(feature.to(lowerCAmelCase_ ) ) # back to the clip feature
# Only support beam search for now
__lowerCamelCase , __lowerCamelCase : Dict =self.generate_beam(
input_embeds=lowerCAmelCase_ , device=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__lowerCamelCase : Optional[Any] =torch.stack(lowerCAmelCase_ )
__lowerCamelCase : Tuple =torch.stack(lowerCAmelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowercase ( self :int , __lowercase :List[str]=None , __lowercase :Tuple=None , __lowercase :str=None , __lowercase :int = 5 , __lowercase :int = 67 , __lowercase :float = 1.0 , __lowercase :Optional[int] = None , ):
__lowerCamelCase : int =eos_token_id
__lowerCamelCase : str =None
__lowerCamelCase : Dict =None
__lowerCamelCase : Optional[Any] =torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.int )
__lowerCamelCase : Any =torch.zeros(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.bool )
if input_embeds is not None:
__lowerCamelCase : Tuple =input_embeds
else:
__lowerCamelCase : List[str] =self.transformer.transformer.wte(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
__lowerCamelCase : Any =self.transformer(inputs_embeds=lowerCAmelCase_ )
__lowerCamelCase : int =outputs.logits
__lowerCamelCase : Dict =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__lowerCamelCase : List[Any] =logits.softmax(-1 ).log()
if scores is None:
__lowerCamelCase , __lowerCamelCase : int =logits.topk(lowerCAmelCase_ , -1 )
__lowerCamelCase : Any =generated.expand(lowerCAmelCase_ , *generated.shape[1:] )
__lowerCamelCase , __lowerCamelCase : List[Any] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__lowerCamelCase : Optional[int] =next_tokens
else:
__lowerCamelCase : str =tokens.expand(lowerCAmelCase_ , *tokens.shape[1:] )
__lowerCamelCase : Any =torch.cat((tokens, next_tokens) , dim=1 )
else:
__lowerCamelCase : Tuple =-float(np.inf )
__lowerCamelCase : List[Any] =0
__lowerCamelCase : int =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__lowerCamelCase : Tuple =scores_sum / seq_lengths[:, None]
__lowerCamelCase , __lowerCamelCase : List[str] =scores_sum_average.view(-1 ).topk(lowerCAmelCase_ , -1 )
__lowerCamelCase : Any =next_tokens // scores_sum.shape[1]
__lowerCamelCase : Tuple =seq_lengths[next_tokens_source]
__lowerCamelCase : List[str] =next_tokens % scores_sum.shape[1]
__lowerCamelCase : Optional[int] =next_tokens.unsqueeze(1 )
__lowerCamelCase : List[Any] =tokens[next_tokens_source]
__lowerCamelCase : List[Any] =torch.cat((tokens, next_tokens) , dim=1 )
__lowerCamelCase : Tuple =generated[next_tokens_source]
__lowerCamelCase : List[str] =scores_sum_average * seq_lengths
__lowerCamelCase : Optional[int] =is_stopped[next_tokens_source]
__lowerCamelCase : Tuple =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__lowerCamelCase : List[Any] =torch.cat((generated, next_token_embed) , dim=1 )
__lowerCamelCase : int =is_stopped + next_tokens.eq(lowerCAmelCase_ ).squeeze()
if is_stopped.all():
break
__lowerCamelCase : Union[str, Any] =scores / seq_lengths
__lowerCamelCase : Dict =scores.argsort(descending=lowerCAmelCase_ )
# tokens tensors are already padded to max_seq_length
__lowerCamelCase : List[Any] =[tokens[i] for i in order]
__lowerCamelCase : str =torch.stack(lowerCAmelCase_ , dim=0 )
__lowerCamelCase : Optional[int] =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 712
|
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] =[[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[Any] =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
| 0
|
"""simple docstring"""
from math import isqrt
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCamelCase ) + 1 ) )
def __snake_case ( SCREAMING_SNAKE_CASE: Tuple = 10**6 ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 580
|
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _lowercase ( unittest.TestCase ):
_lowerCamelCase = inspect.getfile(accelerate.test_utils )
_lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
_lowerCamelCase = ['''accelerate''', '''launch''']
_lowerCamelCase = Path.home() / '''.cache/huggingface/accelerate'''
_lowerCamelCase = '''default_config.yaml'''
_lowerCamelCase = config_folder / config_file
_lowerCamelCase = config_folder / '''_default_config.yaml'''
_lowerCamelCase = Path('''tests/test_configs''' )
@classmethod
def lowerCAmelCase__ ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase__ ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=UpperCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(UpperCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class _lowercase ( unittest.TestCase ):
_lowerCamelCase = '''test-tpu'''
_lowerCamelCase = '''us-central1-a'''
_lowerCamelCase = '''ls'''
_lowerCamelCase = ['''accelerate''', '''tpu-config''']
_lowerCamelCase = '''cd /usr/share'''
_lowerCamelCase = '''tests/test_samples/test_command_file.sh'''
_lowerCamelCase = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=UpperCamelCase_ )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
__magic_name__ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=UpperCamelCase_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase_ , )
| 490
| 0
|
from ..utils import DummyObject, requires_backends
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
def _lowerCamelCase ( *A_ : Tuple , **A_ : str ) -> str:
'''simple docstring'''
requires_backends(A_ , ["torch"] )
def _lowerCamelCase ( *A_ : Any , **A_ : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(A_ , ["torch"] )
def _lowerCamelCase ( *A_ : Dict , **A_ : Dict ) -> int:
'''simple docstring'''
requires_backends(A_ , ["torch"] )
def _lowerCamelCase ( *A_ : Dict , **A_ : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(A_ , ["torch"] )
def _lowerCamelCase ( *A_ : Dict , **A_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(A_ , ["torch"] )
def _lowerCamelCase ( *A_ : Any , **A_ : Any ) -> Tuple:
'''simple docstring'''
requires_backends(A_ , ["torch"] )
def _lowerCamelCase ( *A_ : int , **A_ : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(A_ , ["torch"] )
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class lowercase__( metaclass=snake_case__ ):
'''simple docstring'''
snake_case__ = ['''torch''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
| 582
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase__( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Tuple =initial_learning_rate
UpperCamelCase__ : List[str] =warmup_steps
UpperCamelCase__ : List[Any] =power
UpperCamelCase__ : Optional[Any] =decay_schedule_fn
UpperCamelCase__ : List[str] =name
def __call__( self , __SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase__ : Optional[Any] =tf.cast(__SCREAMING_SNAKE_CASE , tf.floataa)
UpperCamelCase__ : Tuple =tf.cast(self.warmup_steps , tf.floataa)
UpperCamelCase__ : Optional[int] =global_step_float / warmup_steps_float
UpperCamelCase__ : List[Any] =self.initial_learning_rate * tf.math.pow(__SCREAMING_SNAKE_CASE , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCamelCase ( A_ : float , A_ : int , A_ : int , A_ : float = 0.0 , A_ : float = 0.9 , A_ : float = 0.999 , A_ : float = 1E-8 , A_ : Optional[float] = None , A_ : Optional[float] = None , A_ : float = 0.0 , A_ : float = 1.0 , A_ : Optional[List[str]] = None , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict =tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=A_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=A_ , )
if num_warmup_steps:
UpperCamelCase__ : Dict =WarmUp(
initial_learning_rate=A_ , decay_schedule_fn=A_ , warmup_steps=A_ , )
if weight_decay_rate > 0.0:
UpperCamelCase__ : Union[str, Any] =AdamWeightDecay(
learning_rate=A_ , weight_decay_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=A_ , )
else:
UpperCamelCase__ : List[Any] =tf.keras.optimizers.Adam(
learning_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 0.0_01 , __SCREAMING_SNAKE_CASE = 0.9 , __SCREAMING_SNAKE_CASE = 0.9_99 , __SCREAMING_SNAKE_CASE = 1E-7 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "AdamWeightDecay" , **__SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =weight_decay_rate
UpperCamelCase__ : Dict =include_in_weight_decay
UpperCamelCase__ : int =exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] ={"WarmUp": WarmUp}
return super(__SCREAMING_SNAKE_CASE , cls).from_config(__SCREAMING_SNAKE_CASE , custom_objects=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self)._prepare_local(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any =tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate")
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] =self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : List[str] =list(zip(*__SCREAMING_SNAKE_CASE))
return super(__SCREAMING_SNAKE_CASE , self).apply_gradients(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , name=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase__ : Optional[int] =apply_state or {}
UpperCamelCase__ : Optional[Any] =apply_state.get((var_device, var_dtype))
if coefficients is None:
UpperCamelCase__ : Any =self._fallback_apply_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : List[Any] =self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tf.control_dependencies([decay]):
return super(__SCREAMING_SNAKE_CASE , self)._resource_apply_dense(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None) -> Dict:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Any =self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tf.control_dependencies([decay]):
return super(__SCREAMING_SNAKE_CASE , self)._resource_apply_sparse(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any =super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) is not None:
return False
return True
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self) -> int:
"""simple docstring"""
UpperCamelCase__ : str =[]
UpperCamelCase__ : List[str] =None
@property
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
if self._accum_steps is None:
UpperCamelCase__ : Any =tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
if not self._gradients:
UpperCamelCase__ : Any =self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__SCREAMING_SNAKE_CASE) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(__SCREAMING_SNAKE_CASE) != len(self._gradients):
raise ValueError(F'''Expected {len(self._gradients)} gradients, but got {len(__SCREAMING_SNAKE_CASE)}''')
for accum_gradient, gradient in zip(self._gradients , __SCREAMING_SNAKE_CASE):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__SCREAMING_SNAKE_CASE)
self._accum_steps.assign_add(1)
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__SCREAMING_SNAKE_CASE))
| 582
| 1
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase="None" , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = relative_attention
a_ = position_biased_input
a_ = pos_att_type
a_ = scope
def lowerCAmelCase__ ( self ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = DebertaVaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )[0]
a_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )[0]
a_ = model(UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = DebertaVaForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = self.num_labels
a_ = DebertaVaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = self.num_labels
a_ = DebertaVaForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = DebertaVaForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = DebertaVaForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Tuple = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Dict = False
def lowerCAmelCase__ ( self ):
a_ = DebertaVaModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = DebertaVaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def lowerCAmelCase__ ( self ):
pass
@slow
def lowerCAmelCase__ ( self ):
a_ = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
a_ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
a_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
# compare the actual values for a slice.
a_ = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 263
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowercase__ =parser.parse_args()
lowercase__ =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ =CLIPImageProcessor()
lowercase__ =CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowercase__ =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 263
| 1
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def lowerCamelCase_( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple[float | int, list[tuple[int, int]]]:
_SCREAMING_SNAKE_CASE : int = grid.shape
_SCREAMING_SNAKE_CASE : str = [-1, 1, 0, 0]
_SCREAMING_SNAKE_CASE : Any = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_SCREAMING_SNAKE_CASE : Any = [(0, source)], set()
_SCREAMING_SNAKE_CASE : str = np.full((rows, cols) , np.inf )
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : List[Any] = np.empty((rows, cols) , dtype=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = None
while queue:
(_SCREAMING_SNAKE_CASE) : Any = heappop(__SCREAMING_SNAKE_CASE )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_SCREAMING_SNAKE_CASE : Optional[Any] = []
while (x, y) != source:
path.append((x, y) )
_SCREAMING_SNAKE_CASE : Optional[int] = predecessors[x, y]
path.append(__SCREAMING_SNAKE_CASE ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
_SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_SCREAMING_SNAKE_CASE : Optional[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) )
_SCREAMING_SNAKE_CASE : Any = dist + 1
_SCREAMING_SNAKE_CASE : Optional[int] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A__ (snake_case : Dict , snake_case : List[Any] , snake_case : Dict , snake_case : List[Any] , snake_case : List[Any] ) -> List[str]:
# Load configuration defined in the metadata file
with open(snake_case ) as metadata_file:
__UpperCamelCase : Optional[int] = json.load(snake_case )
__UpperCamelCase : List[str] = LukeConfig(use_entity_aware_attention=snake_case , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
__UpperCamelCase : Union[str, Any] = torch.load(snake_case , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
__UpperCamelCase : Union[str, Any] = load_original_entity_vocab(snake_case )
# add an entry for [MASK2]
__UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__UpperCamelCase : Any = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase : Optional[int] = AddedToken("""<ent>""" , lstrip=snake_case , rstrip=snake_case )
__UpperCamelCase : List[Any] = AddedToken("""<ent2>""" , lstrip=snake_case , rstrip=snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case )
with open(os.path.join(snake_case , """tokenizer_config.json""" ) , """r""" ) as f:
__UpperCamelCase : int = json.load(snake_case )
__UpperCamelCase : Optional[Any] = """MLukeTokenizer"""
with open(os.path.join(snake_case , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(snake_case , snake_case )
__UpperCamelCase : List[str] = MLukeTokenizer.from_pretrained(snake_case )
# Initialize the embeddings of the special tokens
__UpperCamelCase : Tuple = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
__UpperCamelCase : Any = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
__UpperCamelCase : str = state_dict["""embeddings.word_embeddings.weight"""]
__UpperCamelCase : List[str] = word_emb[ent_init_index].unsqueeze(0 )
__UpperCamelCase : List[Any] = word_emb[enta_init_index].unsqueeze(0 )
__UpperCamelCase : int = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__UpperCamelCase : List[Any] = state_dict[bias_name]
__UpperCamelCase : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__UpperCamelCase : Any = decoder_bias[enta_init_index].unsqueeze(0 )
__UpperCamelCase : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase : Any = F'''encoder.layer.{layer_index}.attention.self.'''
__UpperCamelCase : Union[str, Any] = state_dict[prefix + matrix_name]
__UpperCamelCase : Any = state_dict[prefix + matrix_name]
__UpperCamelCase : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase : List[str] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
__UpperCamelCase : Union[str, Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
__UpperCamelCase : str = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__UpperCamelCase : Optional[int] = state_dict["""entity_predictions.bias"""]
__UpperCamelCase : Optional[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
__UpperCamelCase : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
__UpperCamelCase : Any = LukeForMaskedLM(config=snake_case ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
__UpperCamelCase : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
__UpperCamelCase : Union[str, Any] = state_dict[key]
else:
__UpperCamelCase : Union[str, Any] = state_dict[key]
__UpperCamelCase , __UpperCamelCase : Optional[Any] = model.load_state_dict(snake_case , strict=snake_case )
if set(snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__UpperCamelCase : Tuple = MLukeTokenizer.from_pretrained(snake_case , task="""entity_classification""" )
__UpperCamelCase : Union[str, Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
__UpperCamelCase : Any = (0, 9)
__UpperCamelCase : List[Any] = tokenizer(snake_case , entity_spans=[span] , return_tensors="""pt""" )
__UpperCamelCase : Tuple = model(**snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase : Union[str, Any] = torch.Size((1, 33, 7_68) )
__UpperCamelCase : List[str] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase : int = torch.Size((1, 1, 7_68) )
__UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__UpperCamelCase : Any = MLukeTokenizer.from_pretrained(snake_case )
__UpperCamelCase : Tuple = """Tokyo is the capital of <mask>."""
__UpperCamelCase : str = (24, 30)
__UpperCamelCase : int = tokenizer(snake_case , entity_spans=[span] , return_tensors="""pt""" )
__UpperCamelCase : int = model(**snake_case )
__UpperCamelCase : str = encoding["""input_ids"""][0].tolist()
__UpperCamelCase : Any = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
__UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case )
__UpperCamelCase : Dict = outputs.entity_logits[0][0].argmax().item()
__UpperCamelCase : int = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case ) )
model.save_pretrained(snake_case )
def A__ (snake_case : Tuple ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
__UpperCamelCase : List[str] = [json.loads(snake_case ) for line in open(snake_case )]
__UpperCamelCase : Optional[int] = {}
for entry in data:
__UpperCamelCase : Tuple = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__UpperCamelCase : Dict = entity_id
break
__UpperCamelCase : List[str] = F'''{language}:{entity_name}'''
__UpperCamelCase : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 279
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
a__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__UpperCamelCase : Tuple = torchvision.models.resnetaaa(pretrained=lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = list(model.children() )[:-2]
__UpperCamelCase : List[str] = nn.Sequential(*lowerCAmelCase )
__UpperCamelCase : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : Dict = self.pool(self.model(lowerCAmelCase ) )
__UpperCamelCase : Dict = torch.flatten(lowerCAmelCase , start_dim=2 )
__UpperCamelCase : Optional[int] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : Dict = [json.loads(lowerCAmelCase ) for l in open(lowerCAmelCase )]
__UpperCamelCase : Optional[Any] = os.path.dirname(lowerCAmelCase )
__UpperCamelCase : Optional[int] = tokenizer
__UpperCamelCase : Dict = labels
__UpperCamelCase : int = len(lowerCAmelCase )
__UpperCamelCase : List[str] = max_seq_length
__UpperCamelCase : int = transforms
def __len__( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return len(self.data )
def __getitem__( self : Optional[Any] , lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowerCAmelCase ) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = sentence[0], sentence[1:-1], sentence[-1]
__UpperCamelCase : Optional[int] = sentence[: self.max_seq_length]
__UpperCamelCase : List[str] = torch.zeros(self.n_classes )
__UpperCamelCase : List[str] = 1
__UpperCamelCase : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCamelCase : Dict = self.transforms(lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : List[Any] = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def A__ (snake_case : int ) -> Optional[int]:
__UpperCamelCase : str = [len(row["""sentence"""] ) for row in batch]
__UpperCamelCase , __UpperCamelCase : Optional[int] = len(snake_case ), max(snake_case )
__UpperCamelCase : List[str] = torch.zeros(snake_case , snake_case , dtype=torch.long )
__UpperCamelCase : Tuple = torch.zeros(snake_case , snake_case , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(snake_case , snake_case ) ):
__UpperCamelCase : Optional[Any] = input_row["""sentence"""]
__UpperCamelCase : Any = 1
__UpperCamelCase : List[Any] = torch.stack([row["""image"""] for row in batch] )
__UpperCamelCase : Tuple = torch.stack([row["""label"""] for row in batch] )
__UpperCamelCase : Tuple = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCamelCase : List[str] = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A__ () -> Optional[Any]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A__ () -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 279
| 1
|
"""simple docstring"""
from math import isqrt, loga
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Optional[int] =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A__ : Dict =False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def lowercase ( UpperCamelCase : int = 800800 , UpperCamelCase : int = 800800 ):
"""simple docstring"""
A__ : List[str] =degree * loga(UpperCamelCase )
A__ : Any =int(UpperCamelCase )
A__ : Optional[int] =calculate_prime_numbers(UpperCamelCase )
A__ : List[str] =0
A__ : Optional[int] =0
A__ : int =len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 715
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 595
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class a__ ( __SCREAMING_SNAKE_CASE ):
snake_case__ = 0
snake_case__ = False
snake_case__ = 3.0
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() ,{})
self.assertDictEqual(MockClass(a=2).to_kwargs() ,{'''a''': 2})
self.assertDictEqual(MockClass(a=2 ,b=snake_case_).to_kwargs() ,{'''a''': 2, '''b''': True})
self.assertDictEqual(MockClass(a=2 ,c=2.25).to_kwargs() ,{'''a''': 2, '''c''': 2.25})
@require_cuda
def __UpperCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_lowerCAmelCase:int = GradScalerKwargs(init_scale=1024 ,growth_factor=2)
AcceleratorState._reset_state()
_lowerCAmelCase:List[str] = Accelerator(mixed_precision='''fp16''' ,kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
_lowerCAmelCase:Any = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale ,1024.0)
self.assertEqual(scaler._growth_factor ,2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor ,0.5)
self.assertEqual(scaler._growth_interval ,2000)
self.assertEqual(scaler._enabled ,snake_case_)
@require_multi_gpu
def __UpperCamelCase ( self : int) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__)]
execute_subprocess_async(snake_case_ ,env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase__ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
UpperCamelCase__ = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCamelCase__ = torch.nn.Linear(1_0_0, 2_0_0)
UpperCamelCase__ = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCamelCase__ = ''''''
UpperCamelCase__ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 227
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = old_name
if "patch_embed" in old_name:
_snake_case , _snake_case , _snake_case = old_name.split('''.''' )
if layer == "0":
_snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
_snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
_snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
_snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , UpperCAmelCase__ ):
_snake_case = R'''\b\d{2}\b'''
if bool(re.search(UpperCAmelCase__ , UpperCAmelCase__ ) ):
_snake_case = re.search(R'''\d\.\d\d.''' , UpperCAmelCase__ ).group()
else:
_snake_case = re.search(R'''\d\.\d.''' , UpperCAmelCase__ ).group()
if int(match[0] ) < 6:
_snake_case = old_name.replace(UpperCAmelCase__ , '''''' )
_snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
_snake_case = '''intermediate_stages.''' + trimmed_name
else:
_snake_case = old_name.replace(UpperCAmelCase__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
_snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
_snake_case = str(int(match[2] ) - num_meta4D_last_stage )
_snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
_snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
_snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
_snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
_snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
_snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , UpperCAmelCase__ ):
_snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
_snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
_snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
_snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
_snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
_snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_snake_case = new_name.replace('''norm''' , '''layernorm''' )
_snake_case = '''efficientformer.''' + new_name
else:
_snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
for key in checkpoint.copy().keys():
_snake_case = checkpoint.pop(UpperCAmelCase__ )
_snake_case = val
return checkpoint
def snake_case ( ) -> Optional[int]:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return image
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = torch.load(UpperCAmelCase__ , map_location='''cpu''' )['''model''']
_snake_case = EfficientFormerConfig.from_json_file(UpperCAmelCase__ )
_snake_case = EfficientFormerForImageClassificationWithTeacher(UpperCAmelCase__ )
_snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
_snake_case = config.depths[-1] - config.num_metaad_blocks + 1
_snake_case = convert_torch_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
_snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
_snake_case = prepare_img()
_snake_case = 256
_snake_case = 224
_snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
_snake_case = processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
_snake_case = Compose(
[
Resize(UpperCAmelCase__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(UpperCAmelCase__ ),
ToTensor(),
Normalize(UpperCAmelCase__ , UpperCAmelCase__ ),
] )
_snake_case = image_transforms(UpperCAmelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ )
_snake_case = model(UpperCAmelCase__ )
_snake_case = outputs.logits
_snake_case = (1, 1000)
if "l1" in model_name:
_snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , UpperCAmelCase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , UpperCAmelCase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(UpperCAmelCase__ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase__ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase__ , )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
snake_case = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 714
|
"""simple docstring"""
import requests
snake_case = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def snake_case ( lowerCAmelCase_ ) -> None:
# fetching a list of articles in json format
_snake_case = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(f"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 404
| 0
|
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = [0 for i in range(len(_SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
__lowercase , __lowercase = 0, 0
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
__lowercase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__lowercase = min_edge
while go_next(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__lowercase , __lowercase = i, i + z_result[i] - 1
return z_result
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return i + z_result[i] < len(_SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__lowercase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 402
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
snake_case__ : Optional[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : int ):
'''simple docstring'''
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 402
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Any = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['''YolosFeatureExtractor''']
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710
|
"""simple docstring"""
from collections import Counter
from timeit import timeit
def A_ ( UpperCAmelCase__ = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A_ ( UpperCAmelCase__ = "" ) -> bool:
if len(UpperCAmelCase__ ) == 0:
return True
a : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
a : dict[str, int] = {}
for character in lower_case_input_str:
a : Optional[Any] = character_freq_dict.get(UpperCAmelCase__ , 0 ) + 1
a : Any = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A_ ( UpperCAmelCase__ = "" ) -> None:
print('\nFor string = ' , UpperCAmelCase__ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(UpperCAmelCase__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__ : List[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 509
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = ''
for i in table:
res += inp[i - 1]
return res
def _UpperCamelCase ( __UpperCamelCase ) -> Tuple:
return data[1:] + data[0]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = ''
for i in range(len(__UpperCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = int('0b' + data[0] + data[-1] ,2 )
lowerCamelCase_ = int('0b' + data[1:3] ,2 )
return bin(s[row][col] )[2:]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
lowerCamelCase_ = message[:4]
lowerCamelCase_ = message[4:]
lowerCamelCase_ = apply_table(__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = xor(__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = apply_sbox(__UpperCamelCase ,temp[:4] ) # noqa: E741
lowerCamelCase_ = apply_sbox(__UpperCamelCase ,temp[4:] )
lowerCamelCase_ = '0' * (2 - len(__UpperCamelCase )) + l # noqa: E741
lowerCamelCase_ = '0' * (2 - len(__UpperCamelCase )) + r
lowerCamelCase_ = apply_table(l + r ,__UpperCamelCase )
lowerCamelCase_ = xor(__UpperCamelCase ,__UpperCamelCase )
return temp + right
if __name__ == "__main__":
A_ = input("Enter 10 bit key: ")
A_ = input("Enter 8 bit message: ")
A_ = [6, 3, 7, 4, 8, 5, 10, 9]
A_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
A_ = [2, 4, 3, 1]
A_ = [2, 6, 3, 1, 4, 8, 5, 7]
A_ = [4, 1, 3, 5, 7, 2, 8, 6]
A_ = [4, 1, 2, 3, 2, 3, 4, 1]
A_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
A_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
A_ = apply_table(key, paa_table)
A_ = temp[:5]
A_ = temp[5:]
A_ = left_shift(left)
A_ = left_shift(right)
A_ = apply_table(left + right, pa_table)
A_ = left_shift(left)
A_ = left_shift(right)
A_ = left_shift(left)
A_ = left_shift(right)
A_ = apply_table(left + right, pa_table)
# encryption
A_ = apply_table(message, IP)
A_ = function(expansion, sa, sa, keya, temp)
A_ = temp[4:] + temp[:4]
A_ = function(expansion, sa, sa, keya, temp)
A_ = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
A_ = apply_table(CT, IP)
A_ = function(expansion, sa, sa, keya, temp)
A_ = temp[4:] + temp[:4]
A_ = function(expansion, sa, sa, keya, temp)
A_ = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 42
|
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55
| 0
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[str]):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : List[str] = [label.strip() for label in labels.split(''',''') if label.strip()]
return labels
def __call__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : str):
'''simple docstring'''
if len(lowercase_) == 0 or len(lowercase_) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(lowercase_))
if isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Dict = [sequences]
SCREAMING_SNAKE_CASE_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase_)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Dict=ZeroShotClassificationArgumentHandler() , *lowercase_ : List[str] , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args_parser
super().__init__(*lowercase_ , **lowercase_)
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''')
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail'''):
return ind
return -1
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str]=True , lowercase_ : Tuple=True , lowercase_ : str=TruncationStrategy.ONLY_FIRST , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''')
SCREAMING_SNAKE_CASE_ : int = self.tokenizer.eos_token
try:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , truncation=lowercase_ , )
except Exception as e:
if "too short" in str(lowercase_):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowercase_ : str):
'''simple docstring'''
if kwargs.get('''multi_class''' , lowercase_) is not None:
SCREAMING_SNAKE_CASE_ : Tuple = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE_ : List[str] = self._args_parser._parse_labels(kwargs['''candidate_labels'''])
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE_ : str = kwargs['''hypothesis_template''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if "multi_label" in kwargs:
SCREAMING_SNAKE_CASE_ : int = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , lowercase_ : Union[str, List[str]] , *lowercase_ : Tuple , **lowercase_ : int , ):
'''simple docstring'''
if len(lowercase_) == 0:
pass
elif len(lowercase_) == 1 and "candidate_labels" not in kwargs:
SCREAMING_SNAKE_CASE_ : Optional[int] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}')
return super().__call__(lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Any=None , lowercase_ : Optional[int]="This example is {}."):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self._args_parser(lowercase_ , lowercase_ , lowercase_)
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase_ , lowercase_)):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase_) - 1,
**model_input,
}
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = inputs['''candidate_label''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs['''sequence''']
SCREAMING_SNAKE_CASE_ : int = {k: inputs[k] for k in self.tokenizer.model_input_names}
SCREAMING_SNAKE_CASE_ : str = self.model(**lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [outputs['''candidate_label'''] for outputs in model_outputs]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [outputs['''sequence'''] for outputs in model_outputs]
SCREAMING_SNAKE_CASE_ : Dict = np.concatenate([output['''logits'''].numpy() for output in model_outputs])
SCREAMING_SNAKE_CASE_ : Tuple = logits.shape[0]
SCREAMING_SNAKE_CASE_ : int = len(lowercase_)
SCREAMING_SNAKE_CASE_ : str = N // n
SCREAMING_SNAKE_CASE_ : str = logits.reshape((num_sequences, n, -1))
if multi_label or len(lowercase_) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
SCREAMING_SNAKE_CASE_ : int = self.entailment_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = -1 if entailment_id == 0 else 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
SCREAMING_SNAKE_CASE_ : List[str] = np.exp(lowercase_) / np.exp(lowercase_).sum(-1 , keepdims=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
SCREAMING_SNAKE_CASE_ : int = reshaped_outputs[..., self.entailment_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.exp(lowercase_) / np.exp(lowercase_).sum(-1 , keepdims=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 176
|
"""simple docstring"""
from __future__ import annotations
import requests
def _A (__a ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(__a ).json()
def _A (__a = 10 ) -> list[dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
SCREAMING_SNAKE_CASE_ : Dict = requests.get(__a ).json()[:max_stories]
return [get_hackernews_story(__a ) for story_id in story_ids]
def _A (__a = 10 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = hackernews_top_stories(__a )
return "\n".join('''* [{title}]({url})'''.format(**__a ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 176
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowercase_ : Dict = parser.parse_args()
lowercase_ : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase_ : Any = CLIPImageProcessor()
lowercase_ : str = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowercase_ : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 64
|
import math
def A_ ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _UpperCAmelCase = 0.1 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671
| 0
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowercase__( __SCREAMING_SNAKE_CASE : Dataset , __SCREAMING_SNAKE_CASE : Dict[str, str] ):
lowercase_ : Optional[int] = args.log_outputs
lowercase_ : Any = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowercase_ : str = load_metric('wer' )
lowercase_ : List[str] = load_metric('cer' )
# compute metrics
lowercase_ : str = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowercase_ : Optional[int] = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowercase_ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(__SCREAMING_SNAKE_CASE )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase_ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
lowercase_ : Tuple = F'''log_{dataset_id}_targets.txt'''
with open(__SCREAMING_SNAKE_CASE , 'w' ) as p, open(__SCREAMING_SNAKE_CASE , 'w' ) as t:
# mapping function to write output
def write_to_file(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__SCREAMING_SNAKE_CASE , with_indices=__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase_ : List[str] = re.sub(__SCREAMING_SNAKE_CASE , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase_ : Union[str, Any] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
lowercase_ : str = ' '.join(text.split(__SCREAMING_SNAKE_CASE ) )
return text
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
# load dataset
lowercase_ : Optional[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase_ : int = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase_ : str = feature_extractor.sampling_rate
# resample audio
lowercase_ : Optional[Any] = dataset.cast_column('audio' , Audio(sampling_rate=__SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
lowercase_ : Dict = 0 if torch.cuda.is_available() else -1
lowercase_ : str = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : List[str] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase_ : Optional[Any] = prediction['text']
lowercase_ : int = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowercase_ : Optional[int] = dataset.map(__SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
main(args)
| 477
|
"""simple docstring"""
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=3 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : List[str]=3 , UpperCamelCase : Tuple=10 , UpperCamelCase : Any=[10, 20, 30, 40] , UpperCamelCase : Dict=[1, 1, 2, 1] , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple="relu" , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Dict=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = self.get_config()
return config, pixel_values
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ (self : Dict , UpperCamelCase : Dict , UpperCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = FlaxRegNetModel(config=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ (self : Any , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = FlaxRegNetForImageClassification(config=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : int = False
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = FlaxRegNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase )
lowercase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] ):
lowercase__ = model_class(UpperCamelCase )
lowercase__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
lowercase__ = model_class(UpperCamelCase )
@jax.jit
def model_jitted(UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
return model(pixel_values=UpperCamelCase , **UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
lowercase__ = model_jitted(**UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__ = model_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase , return_tensors='''np''' )
lowercase__ = model(**UpperCamelCase )
# verify the logits
lowercase__ = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowercase__ = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 ) )
| 460
|
'''simple docstring'''
from math import sqrt
def _SCREAMING_SNAKE_CASE (A = 1_000_000 ) -> int:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 460
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =sorted(numsa + numsa )
__UpperCamelCase =divmod(len(__UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__lowercase = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 714
|
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =1
__UpperCamelCase =1
__UpperCamelCase ={1: 1}
for inputa in range(2 , __UpperCamelCase ):
__UpperCamelCase =0
__UpperCamelCase =inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__UpperCamelCase =(3 * number) + 1
counter += 1
if inputa not in counters:
__UpperCamelCase =counter
if counter > pre_counter:
__UpperCamelCase =inputa
__UpperCamelCase =counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 296
| 0
|
from collections import defaultdict
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__SCREAMING_SNAKE_CASE = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_A ) )
]
__SCREAMING_SNAKE_CASE = defaultdict(_A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__SCREAMING_SNAKE_CASE = (1 << len(_A )) - 1
def _A ( self , _A , _A ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__SCREAMING_SNAKE_CASE = self.count_ways_until(_A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__SCREAMING_SNAKE_CASE = total_ways_util
return self.dp[mask][task_no]
def _A ( self , _A ):
'''simple docstring'''
for i in range(len(_A ) ):
for j in task_performed[i]:
self.task[j].append(_A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCAmelCase__ : str =5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCAmelCase__ : Optional[Any] =[[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 148
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : Any ={
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 148
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spiece.model"""}
lowercase_ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class __UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : Tuple , _A : int=False , _A : List[str]=True , _A : Any=False , _A : Optional[Any]="<s>" , _A : Optional[int]="</s>" , _A : List[Any]="<unk>" , _A : Optional[int]="<sep>" , _A : str="<pad>" , _A : Optional[Any]="<cls>" , _A : Tuple="<mask>" , _A : Dict=["<eop>", "<eod>"] , _A : Optional[Dict[str, Any]] = None , **_A : Tuple , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
__SCREAMING_SNAKE_CASE : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
__SCREAMING_SNAKE_CASE : List[str] = 3
__SCREAMING_SNAKE_CASE : Tuple = do_lower_case
__SCREAMING_SNAKE_CASE : Any = remove_space
__SCREAMING_SNAKE_CASE : Any = keep_accents
__SCREAMING_SNAKE_CASE : str = vocab_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = jieba
__SCREAMING_SNAKE_CASE : Dict = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : List[Any] = None
return state
def __setstate__( self : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Dict = {}
__SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : str ):
"""simple docstring"""
if self.remove_space:
__SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
__SCREAMING_SNAKE_CASE : Tuple = inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
__SCREAMING_SNAKE_CASE : List[Any] = unicodedata.normalize('''NFKD''' , _lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(_lowercase )] )
if self.do_lower_case:
__SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : List[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.preprocess_text(_lowercase )
__SCREAMING_SNAKE_CASE : Any = self.sp_model.encode(_lowercase , out_type=_lowercase )
__SCREAMING_SNAKE_CASE : Tuple = []
for piece in pieces:
if len(_lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__SCREAMING_SNAKE_CASE : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__SCREAMING_SNAKE_CASE : List[Any] = cur_pieces[1:]
else:
__SCREAMING_SNAKE_CASE : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowercase )
else:
new_pieces.append(_lowercase )
return new_pieces
def UpperCAmelCase__ ( self : Dict , _A : Tuple ):
"""simple docstring"""
return self.sp_model.PieceToId(_lowercase )
def UpperCAmelCase__ ( self : Optional[int] , _A : List[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(_lowercase )
def UpperCAmelCase__ ( self : int , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = "".join(_lowercase ).replace(_lowercase , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1, 1]
return ([0] * len(_lowercase )) + [1, 1]
def UpperCAmelCase__ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : Tuple , _A : str , _A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : List[str] , *_A : int , **_A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = super()._decode(*_lowercase , **_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 74
|
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Any ) -> Any:
# Initialise PyTorch model
lowerCAmelCase__ : Any = MobileBertConfig.from_json_file(lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase__ : str = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCAmelCase__ : Union[str, Any] = load_tf_weights_in_mobilebert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 308
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Dict = logging.get_logger(__name__)
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ = 1024
UpperCamelCase__ = 4096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = [5, 11, 17, 23]
UpperCamelCase__ = [256, 512, 1024, 1024]
UpperCamelCase__ = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ = True
UpperCamelCase__ = 150
UpperCamelCase__ = "huggingface/label-files"
UpperCamelCase__ = "ade20k-id2label.json"
UpperCamelCase__ = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__A , __A )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
UpperCamelCase__ = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
UpperCamelCase__ = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
UpperCamelCase__ = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
UpperCamelCase__ = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
UpperCamelCase__ = name.replace("proj" , "projection" )
if "blocks" in name:
UpperCamelCase__ = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
UpperCamelCase__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase__ = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
UpperCamelCase__ = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
UpperCamelCase__ = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
UpperCamelCase__ = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
UpperCamelCase__ = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
UpperCamelCase__ = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
UpperCamelCase__ = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
UpperCamelCase__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
UpperCamelCase__ = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
UpperCamelCase__ = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
UpperCamelCase__ = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
UpperCamelCase__ = name.replace("conv1" , "convolution1" )
if "conv2" in name:
UpperCamelCase__ = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
UpperCamelCase__ = name.replace("pretrained" , "dpt" )
if "bn" in name:
UpperCamelCase__ = name.replace("bn" , "batch_norm" )
if "head" in name:
UpperCamelCase__ = name.replace("head" , "head.head" )
if "encoder.norm" in name:
UpperCamelCase__ = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
UpperCamelCase__ = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
UpperCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = get_dpt_config(__A )
# load original state_dict from URL
UpperCamelCase__ = torch.hub.load_state_dict_from_url(__A , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__A )
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ = state_dict.pop(__A )
UpperCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__A , __A )
# load HuggingFace model
UpperCamelCase__ = DPTForSemanticSegmentation(__A ) if "ade" in checkpoint_url else DPTForDepthEstimation(__A )
model.load_state_dict(__A )
model.eval()
# Check outputs on an image
UpperCamelCase__ = 480 if "ade" in checkpoint_url else 384
UpperCamelCase__ = DPTImageProcessor(size=__A )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(__A , return_tensors="pt" )
# forward pass
UpperCamelCase__ = model(**__A ).logits if "ade" in checkpoint_url else model(**__A ).predicted_depth
# Assert logits
UpperCamelCase__ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(__A )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __A , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __A )
)
Path(__A ).mkdir(exist_ok=__A )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__A )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__A , )
image_processor.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__A , )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
a__ : Union[str, Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 223
|
'''simple docstring'''
import os
from pathlib import Path
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
from torch.utils.cpp_extension import load
UpperCamelCase__ = Path(__A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCamelCase__ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , __A , with_cuda=__A , extra_include_paths=[str(__A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 223
| 1
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def __lowerCAmelCase ( __UpperCamelCase : dict , __UpperCamelCase : str , __UpperCamelCase : set , __UpperCamelCase : set , __UpperCamelCase : dict , __UpperCamelCase : dict , __UpperCamelCase : PriorityQueue , __UpperCamelCase : dict , __UpperCamelCase : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case_ : List[Any] = cst_fwd.get(__UpperCamelCase , np.inf )
snake_case_ : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case_ : List[str] = new_cost_f
snake_case_ : int = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case_ : Optional[int] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : dict , __UpperCamelCase : dict ):
'''simple docstring'''
snake_case_ : List[Any] = -1
snake_case_ : List[Any] = set()
snake_case_ : Union[str, Any] = set()
snake_case_ : List[str] = {source: 0}
snake_case_ : Optional[int] = {destination: 0}
snake_case_ : List[Any] = {source: None}
snake_case_ : str = {destination: None}
snake_case_ : PriorityQueue[Any] = PriorityQueue()
snake_case_ : PriorityQueue[Any] = PriorityQueue()
snake_case_ : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case_ , snake_case_ : Dict = queue_forward.get()
visited_forward.add(__UpperCamelCase )
snake_case_ , snake_case_ : Dict = queue_backward.get()
visited_backward.add(__UpperCamelCase )
snake_case_ : Optional[Any] = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
snake_case_ : int = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case_ : List[str] = shortest_distance
return shortest_path_distance
__lowerCAmelCase : Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCAmelCase : Tuple = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58
| 1
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 716
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
assert column_title.isupper()
a__ = 0
a__ = len(lowerCamelCase_) - 1
a__ = 0
while index >= 0:
a__ = (ord(column_title[index]) - 64) * pow(26 , lowerCamelCase_)
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 200
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _lowerCamelCase ( datasets.BeamBasedBuilder ):
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Dict:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
class _lowerCamelCase ( datasets.BeamBasedBuilder ):
def UpperCamelCase_ ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
def A__ ( ):
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def A__ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class _lowerCamelCase ( UpperCamelCase_ ):
@require_beam
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Optional[int]= len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__: str= DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
SCREAMING_SNAKE_CASE__: str= builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def UpperCamelCase_ ( self ) -> int:
import apache_beam as beam
SCREAMING_SNAKE_CASE__: Tuple= beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE__: List[Any]= len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__: str= DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
SCREAMING_SNAKE_CASE__: Union[str, Any]= partial(lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def UpperCamelCase_ ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__: List[str]= DummyBeamDataset(cache_dir=lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__: Tuple= NestedBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
SCREAMING_SNAKE_CASE__: List[str]= builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 64
|
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 530
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''dandelin/vilt-b32-finetuned-vqa'''
UpperCamelCase_ = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
UpperCamelCase_ = '''image_qa'''
UpperCamelCase_ = AutoProcessor
UpperCamelCase_ = AutoModelForVisualQuestionAnswering
UpperCamelCase_ = ['''image''', '''text''']
UpperCamelCase_ = ['''text''']
def __init__( self : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : "Image" , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
return self.pre_processor(UpperCAmelCase , UpperCAmelCase , return_tensors='''pt''' )
def A__ ( self : Dict , UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
return self.model(**UpperCAmelCase ).logits
def A__ ( self : Dict , UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : str =outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 8
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = params
UpperCAmelCase_ = np.array(_UpperCAmelCase )
UpperCAmelCase_ = np.array([len(_UpperCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , _UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.lengths )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.params.max_model_input_size
UpperCAmelCase_ = self.lengths > max_len
logger.info(F"""Splitting {sum(_UpperCAmelCase )} too long sequences.""" )
def divide_chunks(_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
return [l[i : i + n] for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if self.params.mlm:
UpperCAmelCase_ , UpperCAmelCase_ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCAmelCase_ , UpperCAmelCase_ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCAmelCase_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCAmelCase_ = np.insert(_UpperCAmelCase , 0 , _UpperCAmelCase )
if sub_s[-1] != sep_id:
UpperCAmelCase_ = np.insert(_UpperCAmelCase , len(_UpperCAmelCase ) , _UpperCAmelCase )
assert len(_UpperCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_UpperCAmelCase )
new_tok_ids.extend(_UpperCAmelCase )
new_lengths.extend([len(_UpperCAmelCase ) for l in sub_seqs] )
UpperCAmelCase_ = np.array(_UpperCAmelCase )
UpperCAmelCase_ = np.array(_UpperCAmelCase )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = len(self )
UpperCAmelCase_ = self.lengths > 11
UpperCAmelCase_ = self.token_ids[indices]
UpperCAmelCase_ = self.lengths[indices]
UpperCAmelCase_ = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCAmelCase_ = self.params.special_tok_ids["unk_token"]
UpperCAmelCase_ = len(self )
UpperCAmelCase_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCAmelCase_ = (unk_occs / self.lengths) < 0.5
UpperCAmelCase_ = self.token_ids[indices]
UpperCAmelCase_ = self.lengths[indices]
UpperCAmelCase_ = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = [t[0] for t in batch]
UpperCAmelCase_ = [t[1] for t in batch]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
# Max for paddings
UpperCAmelCase_ = max(_UpperCAmelCase )
# Pad token ids
if self.params.mlm:
UpperCAmelCase_ = self.params.special_tok_ids["pad_token"]
else:
UpperCAmelCase_ = self.params.special_tok_ids["unk_token"]
UpperCAmelCase_ = [list(t.astype(_UpperCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(_UpperCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(_UpperCAmelCase )
assert all(len(_UpperCAmelCase ) == max_seq_len_ for t in tk_ )
UpperCAmelCase_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCAmelCase_ = torch.tensor(_UpperCAmelCase ) # (bs)
return tk_t, lg_t
| 82
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ):
if dataset.ndim != value_array.ndim:
__a : Optional[Any] = (
'''Wrong input data\'s dimensions... '''
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(lowerCAmelCase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
__a : Optional[int] = (
'''Wrong input data\'s shape... '''
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(lowerCAmelCase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
__a : Tuple = (
'''Input data have different datatype... '''
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(lowerCAmelCase__ )
__a : Optional[Any] = []
for value in value_array:
__a : Union[str, Any] = euclidean(lowerCAmelCase__ , dataset[0] )
__a : List[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
__a : List[str] = euclidean(lowerCAmelCase__ , lowerCAmelCase__ )
if dist > temp_dist:
__a : List[Any] = temp_dist
__a : Optional[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ):
return np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) / (norm(lowerCAmelCase__ ) * norm(lowerCAmelCase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521
| 0
|
def _UpperCamelCase ( lowerCAmelCase_ ) ->str:
return "".join([hex(lowerCAmelCase_ )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase_ )] )
def _UpperCamelCase ( lowerCAmelCase_ ) ->bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(lowerCAmelCase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowercase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = torch.nn.Linear(1_0 , 1_0 )
UpperCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase = Accelerator()
UpperCAmelCase = accelerator.prepare(__lowerCamelCase )
try:
pickle.loads(pickle.dumps(__lowerCamelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 627
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( __lowercase,unittest.TestCase ):
A__ : List[str] = RoCBertTokenizer
A__ : str = None
A__ : Dict = False
A__ : Dict = True
A__ : Any = filter_non_english
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
_snake_case = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
_snake_case = {}
_snake_case = {}
for i, value in enumerate(UpperCAmelCase__ ):
_snake_case = i
_snake_case = i
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_snake_case = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_snake_case = {}
for i, token in enumerate(UpperCAmelCase__ ):
_snake_case = i
_snake_case = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
_snake_case = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_snake_case = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_snake_case = tokenizer_r.encode_plus(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , '''do_lower_case''' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = ['''的''', '''人''', '''有''']
_snake_case = ''''''.join(UpperCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_snake_case = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_snake_case = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_snake_case = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_snake_case = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
_snake_case = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_snake_case = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_snake_case = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_snake_case = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_snake_case = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
_snake_case = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ )
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_snake_case = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase__ )
_snake_case = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase__ )
_snake_case = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
_snake_case = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case = '''你好,你是谁'''
_snake_case = tokenizer.tokenize(UpperCAmelCase__ )
_snake_case = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
_snake_case = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ )
_snake_case = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ )
_snake_case = tokenizer.prepare_for_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_snake_case = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 103
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = '''gpt_bigcode'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : List[Any] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=5_0_2_5_7 , UpperCAmelCase__ : List[str]=1_0_2_4 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : int=1_2 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]="gelu_pytorch_tanh" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=5_0_2_5_6 , UpperCAmelCase__ : Any=5_0_2_5_6 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : int , ) -> List[str]:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = attention_softmax_in_fpaa
lowerCAmelCase = scale_attention_softmax_in_fpaa
lowerCAmelCase = multi_query
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 133
| 0
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __A ( enum.Enum ):
__A = 0
__A = 1
__A = 2
@add_end_docstrings(a )
class __A ( a ):
__A = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase =None
if self.model.config.prefix is not None:
lowerCamelCase =self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase =self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase , lowerCamelCase , lowerCamelCase =self._sanitize_parameters(prefix=UpperCAmelCase_ , **self._forward_params )
lowerCamelCase ={**self._preprocess_params, **preprocess_params}
lowerCamelCase ={**self._forward_params, **forward_params}
def _snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
lowerCamelCase ={}
if prefix is not None:
lowerCamelCase =prefix
if prefix:
lowerCamelCase =self.tokenizer(
UpperCAmelCase_ , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=self.framework )
lowerCamelCase =prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
""" [None, 'hole']""" )
lowerCamelCase =handle_long_generation
preprocess_params.update(UpperCAmelCase_ )
lowerCamelCase =generate_kwargs
lowerCamelCase ={}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase =ReturnType.TENSORS
if return_type is not None:
lowerCamelCase =return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase =clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase =self.tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCamelCase =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __call__( self , UpperCAmelCase_ , **UpperCAmelCase_ ):
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_="" , UpperCAmelCase_=None , **UpperCAmelCase_ ):
lowerCamelCase =self.tokenizer(
prefix + prompt_text , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=self.framework )
lowerCamelCase =prompt_text
if handle_long_generation == "hole":
lowerCamelCase =inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase =generate_kwargs["""max_new_tokens"""]
else:
lowerCamelCase =generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase =self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
lowerCamelCase =inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase =inputs["""attention_mask"""][:, -keep_length:]
return inputs
def _snake_case ( self , UpperCAmelCase_ , **UpperCAmelCase_ ):
lowerCamelCase =model_inputs["""input_ids"""]
lowerCamelCase =model_inputs.get("""attention_mask""" , UpperCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase =None
lowerCamelCase =None
lowerCamelCase =1
else:
lowerCamelCase =input_ids.shape[0]
lowerCamelCase =model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase =generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
lowerCamelCase ="""max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase =generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase ="""min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase =self.model.generate(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase =generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase =generated_sequence.reshape(UpperCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase =tf.reshape(UpperCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=ReturnType.FULL_TEXT , UpperCAmelCase_=True ):
lowerCamelCase =model_outputs["""generated_sequence"""][0]
lowerCamelCase =model_outputs["""input_ids"""]
lowerCamelCase =model_outputs["""prompt_text"""]
lowerCamelCase =generated_sequence.numpy().tolist()
lowerCamelCase =[]
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase ={"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase =self.tokenizer.decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase =0
else:
lowerCamelCase =len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase =prompt_text + text[prompt_length:]
else:
lowerCamelCase =text[prompt_length:]
lowerCamelCase ={"""generated_text""": all_text}
records.append(UpperCAmelCase_ )
return records
| 269
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCAmelCase__ : List[Any] =list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCAmelCase__ : Dict =[file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('''\n'''.join(upper_files) + '''\n''')
UpperCAmelCase__ : Dict =[file for file in filepaths if ''' ''' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('''\n'''.join(space_files) + '''\n''')
UpperCAmelCase__ : Dict =[file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('''\n'''.join(hyphen_files) + '''\n''')
UpperCAmelCase__ : Optional[int] =[file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('''\n'''.join(nodir_files) + '''\n''')
UpperCAmelCase__ : str =len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 269
| 1
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCAmelCase ( __a : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(__a ,__a ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__a ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_a : Union[str, Any] = QuantumRegister(__a ,'''qr''' )
_a : List[Any] = ClassicalRegister(__a ,'''cr''' )
_a : str = QuantumCircuit(__a ,__a )
_a : Any = number_of_qubits
for i in range(__a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,__a ,__a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__a ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__a ,__a )
# simulate with 10000 shots
_a : Any = Aer.get_backend('''qasm_simulator''' )
_a : Optional[int] = execute(__a ,__a ,shots=10_000 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 14
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCamelCase : int = getLogger(__name__)
__UpperCamelCase : int = """cuda""" if torch.cuda.is_available() else """cpu"""
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str], SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: int = 8, SCREAMING_SNAKE_CASE__: str = DEFAULT_DEVICE, SCREAMING_SNAKE_CASE__: Any=False, SCREAMING_SNAKE_CASE__: Tuple="summarization", SCREAMING_SNAKE_CASE__: List[Any]=None, **SCREAMING_SNAKE_CASE__: int, ) -> Dict:
"""simple docstring"""
__a = Path(SCREAMING_SNAKE_CASE__ ).open('w', encoding='utf-8' )
__a = str(SCREAMING_SNAKE_CASE__ )
__a = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if fpaa:
__a = model.half()
__a = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__a = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if prefix is None:
__a = prefix or getattr(model.config, 'prefix', '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) ):
__a = [prefix + text for text in examples_chunk]
__a = tokenizer(SCREAMING_SNAKE_CASE__, return_tensors='pt', truncation=SCREAMING_SNAKE_CASE__, padding='longest' ).to(SCREAMING_SNAKE_CASE__ )
__a = model.generate(
input_ids=batch.input_ids, attention_mask=batch.attention_mask, **SCREAMING_SNAKE_CASE__, )
__a = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__, skip_special_tokens=SCREAMING_SNAKE_CASE__, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__a = int(time.time() - start_time ) # seconds
__a = len(SCREAMING_SNAKE_CASE__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4 )}
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int]=True ) -> List[Any]:
"""simple docstring"""
__a = argparse.ArgumentParser()
parser.add_argument('model_name', type=SCREAMING_SNAKE_CASE__, help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path', type=SCREAMING_SNAKE_CASE__, help='like cnn_dm/test.source' )
parser.add_argument('save_path', type=SCREAMING_SNAKE_CASE__, help='where to save summaries' )
parser.add_argument('--reference_path', type=SCREAMING_SNAKE_CASE__, required=SCREAMING_SNAKE_CASE__, help='like cnn_dm/test.target' )
parser.add_argument('--score_path', type=SCREAMING_SNAKE_CASE__, required=SCREAMING_SNAKE_CASE__, default='metrics.json', help='where to save metrics' )
parser.add_argument('--device', type=SCREAMING_SNAKE_CASE__, required=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix', type=SCREAMING_SNAKE_CASE__, required=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='will be added to the begininng of src examples' )
parser.add_argument('--task', type=SCREAMING_SNAKE_CASE__, default='summarization', help='used for task_specific_params + metrics' )
parser.add_argument('--bs', type=SCREAMING_SNAKE_CASE__, default=8, required=SCREAMING_SNAKE_CASE__, help='batch size' )
parser.add_argument(
'--n_obs', type=SCREAMING_SNAKE_CASE__, default=-1, required=SCREAMING_SNAKE_CASE__, help='How many observations. Defaults to all.' )
parser.add_argument('--fp16', action='store_true' )
parser.add_argument('--dump-args', action='store_true', help='print the custom hparams with the results' )
parser.add_argument(
'--info', nargs='?', type=SCREAMING_SNAKE_CASE__, const=datetime_now(), help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
), )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__a , __a = parser.parse_known_args()
__a = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE__ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__a = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__a = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__a = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE__, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fpaa=args.fpaa, task=args.task, prefix=args.prefix, **SCREAMING_SNAKE_CASE__, )
if args.reference_path is None:
return {}
# Compute scores
__a = calculate_bleu if 'translation' in args.task else calculate_rouge
__a = [x.rstrip() for x in open(args.save_path ).readlines()]
__a = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
__a = score_fn(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
scores.update(SCREAMING_SNAKE_CASE__ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE__ )
if args.info:
__a = args.info
if verbose:
print(SCREAMING_SNAKE_CASE__ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE__, open(args.score_path, 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 448
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a__ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , *_a , **_a ) -> Optional[Any]:
super().__init__(*_a , **_a )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __lowercase ( self , _a=None ) -> str:
_a : int = {}
if top_k is not None:
_a : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self , _a , **_a ) -> List[Any]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_image(_a )
_a : List[str] = self.image_processor(images=_a , return_tensors=self.framework )
return model_inputs
def __lowercase ( self , _a ) -> Any:
_a : Tuple = self.model(**_a )
return model_outputs
def __lowercase ( self , _a , _a=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_a : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_a : Optional[int] = model_outputs.logits.softmax(-1 )[0]
_a : Dict = probs.topk(_a )
elif self.framework == "tf":
_a : Optional[int] = stable_softmax(model_outputs.logits , axis=-1 )[0]
_a : Tuple = tf.math.top_k(_a , k=_a )
_a : int = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
_a : int = scores.tolist()
_a : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_a , _a )]
| 700
|
import os
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with open(os.path.dirname(__a ) + '''/grid.txt''' ) as f:
_a : str = [] # noqa: E741
for _ in range(20 ):
l.append([int(__a ) for x in f.readline().split()] )
_a : List[str] = 0
# right
for i in range(20 ):
for j in range(17 ):
_a : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_a : Any = temp
# down
for i in range(17 ):
for j in range(20 ):
_a : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_a : int = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_a : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_a : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
_a : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_a : Optional[int] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 578
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=3 , _lowercase=3_2 , _lowercase=3 , _lowercase=1_0 , _lowercase=[1_0, 2_0, 3_0, 4_0] , _lowercase=[1, 1, 2, 1] , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=3 , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : str = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : str = num_channels
snake_case_ : Union[str, Any] = embeddings_size
snake_case_ : Tuple = hidden_sizes
snake_case_ : int = depths
snake_case_ : int = is_training
snake_case_ : Dict = use_labels
snake_case_ : List[str] = hidden_act
snake_case_ : List[str] = num_labels
snake_case_ : Optional[Any] = scope
snake_case_ : Tuple = len(_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = TFResNetModel(config=_lowercase )
snake_case_ : Tuple = model(_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Optional[Any] = TFResNetForImageClassification(_lowercase )
snake_case_ : Optional[int] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Dict = config_and_inputs
snake_case_ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowerCamelCase = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = TFResNetModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_lowercase )
snake_case_ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Any = [*signature.parameters.keys()]
snake_case_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
snake_case_ : str = model_class(_lowercase )
snake_case_ : Tuple = model(**self._prepare_for_class(_lowercase , _lowercase ) )
snake_case_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ : Any = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ : List[str] = layer_type
snake_case_ : Any = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Tuple = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = TFResNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ : str = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : Dict = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
snake_case_ : Dict = model(**_lowercase )
# verify the logits
snake_case_ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowercase )
snake_case_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowercase , atol=1E-4 ) )
| 58
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class a__ :
__magic_name__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__ : Optional[str] = field(
default=_lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__ : Optional[str] = field(
default=_lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__ : Optional[str] = field(
default=_lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
__magic_name__ : bool = field(default=_lowercase, metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__ : bool = field(default=_lowercase, metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class a__ :
__magic_name__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__ : Optional[str] = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
__magic_name__ : Optional[int] = field(
default=10_24, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__magic_name__ : Optional[int] = field(
default=1_28, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__magic_name__ : Optional[int] = field(
default=1_42, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
__magic_name__ : Optional[int] = field(
default=1_42, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__magic_name__ : Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."} )
__magic_name__ : Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__ : Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."} )
__magic_name__ : Optional[str] = field(default=_lowercase, metadata={"help": "Source language id for translation."} )
__magic_name__ : Optional[str] = field(default=_lowercase, metadata={"help": "Target language id for translation."} )
__magic_name__ : Optional[int] = field(default=_lowercase, metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__ : bool = field(
default=_lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Tuple ):
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def __lowercase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE : List[str] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
SCREAMING_SNAKE_CASE : Any = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE : List[Any] = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE : List[Any] = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Tuple = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE : Tuple = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : Any = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
SCREAMING_SNAKE_CASE : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
SCREAMING_SNAKE_CASE : List[Any] = train_result.metrics
SCREAMING_SNAKE_CASE : Optional[Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate(metric_key_prefix='''val''' )
SCREAMING_SNAKE_CASE : List[Any] = data_args.n_val
SCREAMING_SNAKE_CASE : str = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
SCREAMING_SNAKE_CASE : List[str] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix='''test''' )
SCREAMING_SNAKE_CASE : List[str] = test_output.metrics
SCREAMING_SNAKE_CASE : Dict = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE : Union[str, Any] = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 355
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def __lowercase ():
SCREAMING_SNAKE_CASE : List[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE : List[str] = g.get_repo('''huggingface/diffusers''' )
SCREAMING_SNAKE_CASE : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE : List[Any] = sorted(issue.get_comments() , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 355
| 1
|
def a ( snake_case__: list ):
'''simple docstring'''
lowercase_ = len(snake_case__ )
for _ in range(snake_case__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase_ , lowercase_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__a = list(range(1_0, 0, -1))
print(f"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 97
|
from __future__ import annotations
from math import pi, sqrt
def a ( snake_case__: float , snake_case__: float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97
| 1
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__a = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class __lowercase :
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
"""simple docstring"""
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.major, self.minor, self.patch
def _lowercase ( self : Dict , __lowerCamelCase : int ) -> Dict:
"""simple docstring"""
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return Version(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return other
raise TypeError(F"""{other} (type {type(__lowerCamelCase )}) cannot be compared to version.""" )
def __eq__( self : int , __lowerCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
UpperCAmelCase = self._validate_operand(__lowerCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , __lowerCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self._validate_operand(__lowerCamelCase )
return self.tuple < other.tuple
def __hash__( self : Optional[Any] ) -> Any:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _lowercase ( cls : List[str] , __lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self.version_str
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
UpperCAmelCase = _VERSION_REG.match(lowerCAmelCase_ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(lowerCAmelCase_ ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[Any]:
return ".".join(str(lowerCAmelCase_ ) for v in version_tuple )
| 627
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
def __init__( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : Optional[Any]=3_0 , __lowerCamelCase : Any=2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : str=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Union[str, Any]=3_7 , __lowerCamelCase : str="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1_0 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=2 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 2
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFDeiTModel(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForMaskedImageModeling(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForMaskedImageModeling(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase = TFDeiTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Dense ) )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowercase ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=False ) -> int:
"""simple docstring"""
UpperCAmelCase = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFDeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) ->Tuple:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 627
| 1
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A_ ( _lowerCAmelCase ) -> int:
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def A_ ( _lowerCAmelCase ) -> Dict:
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = metric_id
class A__ :
_UpperCAmelCase :Optional[int] = [MetricMock(__snake_case ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCamelCase( self ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
if "tmp_path" in args:
UpperCamelCase : str = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(_lowerCAmelCase , match="https://huggingface.co/docs/evaluate" ):
func(*_lowerCAmelCase )
| 629
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __UpperCamelCase( *A_ , **A_ ):
'''simple docstring'''
pass
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Dict = np.array(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
_UpperCAmelCase :Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCAmelCase :str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MaskGenerationPipeline(model=A_ , image_processor=A_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@slow
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCamelCase : Tuple = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
UpperCamelCase : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = "facebook/sam-vit-huge"
UpperCamelCase : str = pipeline("mask-generation" , model=A_ )
UpperCamelCase : str = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCamelCase : Tuple = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 629
| 1
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : Any = logging.getLogger()
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Any = {}
_lowercase: Optional[Any] = os.path.join(__magic_name__ , "all_results.json" )
if os.path.exists(__magic_name__ ):
with open(__magic_name__ , "r" ) as f:
_lowercase: Any = json.load(__magic_name__ )
else:
raise ValueError(f"can't find {path}" )
return results
_SCREAMING_SNAKE_CASE : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class A ( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Dict):
import xla_spawn
_lowercase: Tuple = self.get_auto_remove_tmp_dir()
_lowercase: Dict = f"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCamelCase , "argv" , _UpperCamelCase):
_lowercase: Optional[Any] = time()
xla_spawn.main()
_lowercase: Optional[int] = time()
_lowercase: Union[str, Any] = get_results(_UpperCamelCase)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500)
def UpperCAmelCase__ ( self : str):
import xla_spawn
_lowercase: str = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(_UpperCamelCase , "argv" , _UpperCamelCase):
xla_spawn.main()
| 712
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[Any] = ShapEImgaImgPipeline
lowerCamelCase : List[Any] = ["""image"""]
lowerCamelCase : Optional[int] = ["""image"""]
lowerCamelCase : List[str] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCamelCase : Optional[Any] = False
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
return 32
@property
def UpperCAmelCase__ ( self : Any):
return 32
@property
def UpperCAmelCase__ ( self : Tuple):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any]):
return 8
@property
def UpperCAmelCase__ ( self : Tuple):
torch.manual_seed(0)
_lowercase: str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_lowercase: Union[str, Any] = CLIPVisionModel(_UpperCamelCase)
return model
@property
def UpperCAmelCase__ ( self : int):
_lowercase: Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCamelCase , do_normalize=_UpperCamelCase , do_resize=_UpperCamelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
@property
def UpperCAmelCase__ ( self : Optional[Any]):
torch.manual_seed(0)
_lowercase: Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_lowercase: List[str] = PriorTransformer(**_UpperCamelCase)
return model
@property
def UpperCAmelCase__ ( self : Any):
torch.manual_seed(0)
_lowercase: Optional[int] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_lowercase: List[str] = ShapERenderer(**_UpperCamelCase)
return model
def UpperCAmelCase__ ( self : str):
_lowercase: Optional[Any] = self.dummy_prior
_lowercase: Any = self.dummy_image_encoder
_lowercase: str = self.dummy_image_processor
_lowercase: Optional[Any] = self.dummy_renderer
_lowercase: int = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=_UpperCamelCase , clip_sample=_UpperCamelCase , clip_sample_range=1.0 , )
_lowercase: Optional[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=0):
_lowercase: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase)
if str(_UpperCamelCase).startswith("mps"):
_lowercase: str = torch.manual_seed(_UpperCamelCase)
else:
_lowercase: str = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowercase: List[str] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: Any = "cpu"
_lowercase: int = self.get_dummy_components()
_lowercase: Dict = self.pipeline_class(**_UpperCamelCase)
_lowercase: Tuple = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowercase: Dict = pipe(**self.get_dummy_inputs(_UpperCamelCase))
_lowercase: List[str] = output.images[0]
_lowercase: int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowercase: Optional[Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self : List[Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: int = torch_device == "cpu"
_lowercase: str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCamelCase , relax_max_difference=_UpperCamelCase , )
def UpperCAmelCase__ ( self : str):
_lowercase: Any = self.get_dummy_components()
_lowercase: Dict = self.pipeline_class(**_UpperCamelCase)
_lowercase: Any = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowercase: List[str] = 1
_lowercase: Dict = 2
_lowercase: str = self.get_dummy_inputs(_UpperCamelCase)
for key in inputs.keys():
if key in self.batch_params:
_lowercase: List[str] = batch_size * [inputs[key]]
_lowercase: Optional[Any] = pipe(**_UpperCamelCase , num_images_per_prompt=_UpperCamelCase)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png")
_lowercase: int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy")
_lowercase: Tuple = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img")
_lowercase: Union[str, Any] = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowercase: Union[str, Any] = torch.Generator(device=_UpperCamelCase).manual_seed(0)
_lowercase: Tuple = pipe(
_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
| 206
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''height''': 384, '''width''': 384}
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_A , default_to_square=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
__SCREAMING_SNAKE_CASE : List[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = resample
__SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
__SCREAMING_SNAKE_CASE : str = rescale_factor
__SCREAMING_SNAKE_CASE : List[str] = do_normalize
__SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__SCREAMING_SNAKE_CASE : Dict = do_convert_rgb
def UpperCAmelCase__ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_A , default_to_square=_A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE : int = (size['''height'''], size['''width'''])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCAmelCase__ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase__ ( self : int , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : bool = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : List[str] = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(_A , default_to_square=_A )
__SCREAMING_SNAKE_CASE : Any = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__SCREAMING_SNAKE_CASE : Optional[Any] = [convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(_A ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : List[Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : Tuple = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(_A , _A ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_A )
return encoded_outputs
| 74
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A )
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , )
for d in range(_A )
] )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
__SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
__SCREAMING_SNAKE_CASE : Dict = hidden_states
__SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A )
__SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A )
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE : Optional[Any] = block(
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , )
# 3. Output
__SCREAMING_SNAKE_CASE : Any = self.proj_out(_A )
__SCREAMING_SNAKE_CASE : List[str] = (
hidden_states[None, None, :]
.reshape(_A , _A , _A , _A , _A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_A )
| 74
| 1
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
A : str = logging.getLogger(__name__)
class UpperCamelCase( __UpperCAmelCase ):
snake_case_ : Any = "summarization"
snake_case_ : List[str] = ["loss"]
snake_case_ : List[str] = ROUGE_KEYS
snake_case_ : int = "rouge2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
__snake_case = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(_lowerCamelCase , num_labels=_lowerCamelCase , mode=self.mode , **_lowerCamelCase )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
__snake_case = Path(self.output_dir ) / "metrics.json"
__snake_case = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
__snake_case = 0
__snake_case = defaultdict(_lowerCamelCase )
__snake_case = self.config.model_type
__snake_case = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
__snake_case = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__snake_case = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
__snake_case = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__snake_case = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__snake_case = get_git_info()["repo_sha"]
__snake_case = hparams.num_workers
__snake_case = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowerCamelCase ):
__snake_case = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__snake_case = self.decoder_start_token_id
__snake_case = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
__snake_case = False
__snake_case = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__snake_case = self.hparams.eval_max_gen_length
else:
__snake_case = self.model.config.max_length
__snake_case = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ) -> Any:
'''simple docstring'''
__snake_case = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(_lowerCamelCase , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
__snake_case = True
return readable_batch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ) -> Any:
'''simple docstring'''
return self.model(_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[int] ) -> str:
'''simple docstring'''
__snake_case = self.tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
return lmap(str.strip , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : dict ) -> str:
'''simple docstring'''
__snake_case = self.tokenizer.pad_token_id
__snake_case , __snake_case = batch["input_ids"], batch["attention_mask"]
__snake_case = batch["labels"]
if isinstance(self.model , _lowerCamelCase ):
__snake_case = self.model._shift_right(_lowerCamelCase )
else:
__snake_case = shift_tokens_right(_lowerCamelCase , _lowerCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__snake_case = decoder_input_ids
self.save_readable_batch(_lowerCamelCase )
__snake_case = self(_lowerCamelCase , attention_mask=_lowerCamelCase , decoder_input_ids=_lowerCamelCase , use_cache=_lowerCamelCase )
__snake_case = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__snake_case = nn.CrossEntropyLoss(ignore_index=_lowerCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
__snake_case = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__snake_case = nn.functional.log_softmax(_lowerCamelCase , dim=-1 )
__snake_case , __snake_case = label_smoothed_nll_loss(
_lowerCamelCase , _lowerCamelCase , self.hparams.label_smoothing , ignore_index=_lowerCamelCase )
return (loss,)
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.pad_token_id
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self._step(_lowerCamelCase )
__snake_case = dict(zip(self.loss_names , _lowerCamelCase ) )
# tokens per batch
__snake_case = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
__snake_case = batch["input_ids"].shape[0]
__snake_case = batch["input_ids"].eq(self.pad ).sum()
__snake_case = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
'''simple docstring'''
return self._generative_step(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]="val" ) -> str:
'''simple docstring'''
self.step_count += 1
__snake_case = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__snake_case = losses["loss"]
__snake_case = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
__snake_case = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__snake_case = torch.tensor(_lowerCamelCase ).type_as(_lowerCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_lowerCamelCase )
__snake_case = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
__snake_case = self.step_count
self.metrics[prefix].append(_lowerCamelCase ) # callback writes this to self.metrics_save_path
__snake_case = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
'''simple docstring'''
return calculate_rouge(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : dict ) -> Dict:
'''simple docstring'''
__snake_case = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__snake_case = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=_lowerCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__snake_case = (time.time() - ta) / batch["input_ids"].shape[0]
__snake_case = self.ids_to_clean_text(_lowerCamelCase )
__snake_case = self.ids_to_clean_text(batch["labels"] )
__snake_case = self._step(_lowerCamelCase )
__snake_case = dict(zip(self.loss_names , _lowerCamelCase ) )
__snake_case = self.calc_generative_metrics(_lowerCamelCase , _lowerCamelCase )
__snake_case = np.mean(lmap(_lowerCamelCase , _lowerCamelCase ) )
base_metrics.update(gen_time=_lowerCamelCase , gen_len=_lowerCamelCase , preds=_lowerCamelCase , target=_lowerCamelCase , **_lowerCamelCase )
return base_metrics
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
'''simple docstring'''
return self._generative_step(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.validation_epoch_end(_lowerCamelCase , prefix="test" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = self.n_obs[type_path]
__snake_case = self.target_lens[type_path]
__snake_case = self.dataset_class(
self.tokenizer , type_path=_lowerCamelCase , n_obs=_lowerCamelCase , max_target_length=_lowerCamelCase , **self.dataset_kwargs , )
return dataset
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.get_dataset(_lowerCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__snake_case = dataset.make_sortish_sampler(_lowerCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCamelCase , num_workers=self.num_workers , sampler=_lowerCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__snake_case = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCamelCase , batch_sampler=_lowerCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCamelCase , num_workers=self.num_workers , sampler=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=_lowerCamelCase )
return dataloader
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
'''simple docstring'''
BaseTransformer.add_model_specific_args(_lowerCamelCase , _lowerCamelCase )
add_generic_args(_lowerCamelCase , _lowerCamelCase )
parser.add_argument(
"--max_source_length" , default=1_0_2_4 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=5_6 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=1_4_2 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=1_4_2 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=_lowerCamelCase )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=_lowerCamelCase )
parser.add_argument("--max_tokens_per_batch" , type=_lowerCamelCase , default=_lowerCamelCase )
parser.add_argument("--logger_name" , type=_lowerCamelCase , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=_lowerCamelCase , default=5_0_0 , required=_lowerCamelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=_lowerCamelCase , default="summarization" , required=_lowerCamelCase , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=_lowerCamelCase , default=0.0 , required=_lowerCamelCase )
parser.add_argument("--src_lang" , type=_lowerCamelCase , default="" , required=_lowerCamelCase )
parser.add_argument("--tgt_lang" , type=_lowerCamelCase , default="" , required=_lowerCamelCase )
parser.add_argument("--eval_beams" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase )
parser.add_argument(
"--val_metric" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=_lowerCamelCase , default=_lowerCamelCase , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=_lowerCamelCase , default=1 , required=_lowerCamelCase , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class UpperCamelCase( __UpperCAmelCase ):
snake_case_ : Dict = "translation"
snake_case_ : Optional[Any] = ["loss"]
snake_case_ : Optional[int] = ["bleu"]
snake_case_ : int = "bleu"
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowerCamelCase , **_lowerCamelCase )
__snake_case = hparams.src_lang
__snake_case = hparams.tgt_lang
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return calculate_bleu(_lowerCamelCase , _lowerCamelCase )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=_lowerCAmelCase )
check_output_dir(_lowerCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__snake_case = SummarizationModule(_lowerCAmelCase )
else:
__snake_case = TranslationModule(_lowerCAmelCase )
__snake_case = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
__snake_case = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__snake_case = os.environ.get("WANDB_PROJECT" , _lowerCAmelCase )
__snake_case = WandbLogger(name=model.output_dir.name , project=_lowerCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__snake_case = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
__snake_case = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__snake_case = False
__snake_case = args.val_metric == "loss"
__snake_case = generic_train(
_lowerCAmelCase , _lowerCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCAmelCase ) , early_stopping_callback=_lowerCAmelCase , logger=_lowerCAmelCase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
__snake_case = ""
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=_lowerCAmelCase ) )
if checkpoints:
__snake_case = checkpoints[-1]
__snake_case = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
A : Optional[Any] = pl.Trainer.add_argparse_args(parser)
A : Tuple = SummarizationModule.add_model_specific_args(parser, os.getcwd())
A : int = parser.parse_args()
main(args)
| 701
|
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _lowerCAmelCase ( ) -> str:
'''simple docstring'''
__snake_case = Github(os.environ["GITHUB_TOKEN"] )
__snake_case = g.get_repo("huggingface/diffusers" )
__snake_case = repo.get_issues(state="open" )
for issue in open_issues:
__snake_case = sorted(issue.get_comments() , key=lambda _lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
__snake_case = comments[0] if len(_lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 473
| 0
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a__ ( UpperCamelCase_ ):
snake_case__ = '''MCTCTFeatureExtractor'''
snake_case__ = '''AutoTokenizer'''
def __init__( self : int ,a__ : Union[str, Any] ,a__ : List[str]) -> Any:
"""simple docstring"""
super().__init__(a__ ,a__)
_lowerCAmelCase:Any = self.feature_extractor
_lowerCAmelCase:int = False
def __call__( self : Union[str, Any] ,*a__ : int ,**a__ : Dict) -> Union[str, Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a__ ,**a__)
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''')
_lowerCAmelCase:Tuple = kwargs.pop('''raw_speech''')
else:
_lowerCAmelCase:Dict = kwargs.pop('''audio''' ,a__)
_lowerCAmelCase:int = kwargs.pop('''sampling_rate''' ,a__)
_lowerCAmelCase:Union[str, Any] = kwargs.pop('''text''' ,a__)
if len(a__) > 0:
_lowerCAmelCase:Optional[Any] = args[0]
_lowerCAmelCase:Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
_lowerCAmelCase:int = self.feature_extractor(a__ ,*a__ ,sampling_rate=a__ ,**a__)
if text is not None:
_lowerCAmelCase:List[Any] = self.tokenizer(a__ ,**a__)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase:Optional[int] = encodings['''input_ids''']
return inputs
def __UpperCamelCase ( self : Optional[int] ,*a__ : str ,**a__ : int) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ ,**a__)
def __UpperCamelCase ( self : Tuple ,*a__ : List[str] ,**a__ : int) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*a__ ,**a__)
_lowerCAmelCase:Any = kwargs.pop('''input_features''' ,a__)
_lowerCAmelCase:int = kwargs.pop('''labels''' ,a__)
if len(a__) > 0:
_lowerCAmelCase:Any = args[0]
_lowerCAmelCase:str = args[1:]
if input_features is not None:
_lowerCAmelCase:List[str] = self.feature_extractor.pad(a__ ,*a__ ,**a__)
if labels is not None:
_lowerCAmelCase:Tuple = self.tokenizer.pad(a__ ,**a__)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCAmelCase:int = labels['''input_ids''']
return input_features
def __UpperCamelCase ( self : Tuple ,*a__ : List[Any] ,**a__ : Any) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*a__ ,**a__)
@contextmanager
def __UpperCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''')
_lowerCAmelCase:Dict = True
_lowerCAmelCase:Optional[Any] = self.tokenizer
yield
_lowerCAmelCase:List[str] = self.feature_extractor
_lowerCAmelCase:Any = False
| 227
|
"""simple docstring"""
import math
from collections.abc import Callable
def UpperCAmelCase ( snake_case : Callable[[float], float] , snake_case : float , snake_case : float ):
_lowerCAmelCase:float = xa
_lowerCAmelCase:float = xa
while True:
if x_n == x_na or function(snake_case ) == function(snake_case ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
_lowerCAmelCase:float = x_na - (
function(snake_case ) / ((function(snake_case ) - function(snake_case )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCAmelCase:Optional[int] = x_na
_lowerCAmelCase:Dict = x_na
def UpperCAmelCase ( snake_case : float ):
return math.pow(snake_case , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 227
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowercase__ =None
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowercase__ ={
'google/fnet-base': 5_12,
'google/fnet-large': 5_12,
}
lowercase__ ='▁'
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'token_type_ids']
lowerCamelCase__ : Any = FNetTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , **UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a_ = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
a_ = do_lower_case
a_ = remove_space
a_ = keep_accents
a_ = vocab_file
a_ = False if not self.vocab_file else True
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a_ = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 701
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ ='pt'
elif is_tf_available():
lowercase__ ='tf'
else:
lowercase__ ='jax'
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : int = PerceiverTokenizer
lowerCamelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self ):
super().setUp()
a_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self ):
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def lowerCAmelCase__ ( self , **UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=20 , UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
a_ = []
for i in range(len(UpperCAmelCase ) ):
try:
a_ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a_ = list(filter(lambda UpperCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCAmelCase ) )
a_ = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
a_ = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
a_ = toks + toks
# toks_str = [t[1] for t in toks]
a_ = [t[0] for t in toks]
# Ensure consistency
a_ = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
a_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
a_ = """ """ + output_txt
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = """Unicode €."""
a_ = tokenizer(UpperCAmelCase )
a_ = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCAmelCase )
# decoding
a_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , """[CLS]Unicode €.[SEP]""" )
a_ = tokenizer("""e è é ê ë""" )
a_ = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCAmelCase )
# decoding
a_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
a_ = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
a_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
a_ = list(batch.input_ids.numpy()[0] )
else:
a_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCAmelCase )
self.assertIn("""attention_mask""" , UpperCAmelCase )
self.assertNotIn("""decoder_input_ids""" , UpperCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = [
"""Summary of the text.""",
"""Another summary.""",
]
a_ = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding="""max_length""" , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCAmelCase__ ( self ):
# safety check on max_len default value so we are sure the test works
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = """ He is very happy, UNwant\u00E9d,running"""
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
a_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
a_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
a_ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
a_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a_ = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a_ = json.load(UpperCAmelCase )
a_ = [f'''<extra_id_{i}>''' for i in range(1_25 )]
a_ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
a_ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a_ = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a_ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCAmelCase )]
a_ = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
a_ = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
a_ = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
| 511
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : List[str]=0.01 , __lowerCAmelCase : Optional[Any]=10_00 ) -> Tuple:
_A = p_stop
_A = max_length
def __iter__( self : Tuple ) -> int:
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : str=True ) -> Optional[int]:
_A = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
_A = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
# Check the shards when the dataset is a round multiple of total batch size.
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def snake_case_ ( self : str ) -> Dict:
# Check the shards when the dataset is a round multiple of batch size.
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> Any:
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Any=2 , __lowerCAmelCase : str=False ) -> Any:
random.seed(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def snake_case_ ( self : int ) -> str:
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> str:
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case_ ( self : Dict ) -> List[str]:
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case_ ( self : Any ) -> List[Any]:
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case_ ( self : Dict ) -> Any:
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def snake_case_ ( self : List[Any] ) -> List[str]:
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 2
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "roberta-prelayernorm"
def __init__( self , __lowerCamelCase=5_0_2_6_5 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase)
_A : str = vocab_size
_A : Union[str, Any] = hidden_size
_A : Optional[int] = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : List[Any] = hidden_act
_A : Dict = intermediate_size
_A : Tuple = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : Dict = type_vocab_size
_A : str = initializer_range
_A : str = layer_norm_eps
_A : List[Any] = position_embedding_type
_A : Union[str, Any] = use_cache
_A : Union[str, Any] = classifier_dropout
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 503
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_( A , A , A , A , A ):
# Load configuration defined in the metadata file
with open(A ) as metadata_file:
UpperCAmelCase_ = json.load(A )
UpperCAmelCase_ = LukeConfig(use_entity_aware_attention=A , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCAmelCase_ = torch.load(A , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
UpperCAmelCase_ = load_original_entity_vocab(A )
# add an entry for [MASK2]
UpperCAmelCase_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCAmelCase_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase_ = AddedToken("""<ent>""" , lstrip=A , rstrip=A )
UpperCAmelCase_ = AddedToken("""<ent2>""" , lstrip=A , rstrip=A )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(A )
with open(os.path.join(A , """tokenizer_config.json""" ) , """r""" ) as f:
UpperCAmelCase_ = json.load(A )
UpperCAmelCase_ = """MLukeTokenizer"""
with open(os.path.join(A , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(A , A )
with open(os.path.join(A , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(A , A )
UpperCAmelCase_ = MLukeTokenizer.from_pretrained(A )
# Initialize the embeddings of the special tokens
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
UpperCAmelCase_ = state_dict["""embeddings.word_embeddings.weight"""]
UpperCAmelCase_ = word_emb[ent_init_index].unsqueeze(0 )
UpperCAmelCase_ = word_emb[enta_init_index].unsqueeze(0 )
UpperCAmelCase_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCAmelCase_ = state_dict[bias_name]
UpperCAmelCase_ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCAmelCase_ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCAmelCase_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase_ = f"""encoder.layer.{layer_index}.attention.self."""
UpperCAmelCase_ = state_dict[prefix + matrix_name]
UpperCAmelCase_ = state_dict[prefix + matrix_name]
UpperCAmelCase_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCAmelCase_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCAmelCase_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCAmelCase_ = state_dict["""entity_predictions.bias"""]
UpperCAmelCase_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCAmelCase_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCAmelCase_ = LukeForMaskedLM(config=A ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
UpperCAmelCase_ = state_dict[key]
else:
UpperCAmelCase_ = state_dict[key]
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(A , strict=A )
if set(A ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCAmelCase_ = MLukeTokenizer.from_pretrained(A , task="""entity_classification""" )
UpperCAmelCase_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
UpperCAmelCase_ = (0, 9)
UpperCAmelCase_ = tokenizer(A , entity_spans=[span] , return_tensors="""pt""" )
UpperCAmelCase_ = model(**A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase_ = torch.Size((1, 33, 768) )
UpperCAmelCase_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase_ = torch.Size((1, 1, 768) )
UpperCAmelCase_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCAmelCase_ = MLukeTokenizer.from_pretrained(A )
UpperCAmelCase_ = """Tokyo is the capital of <mask>."""
UpperCAmelCase_ = (24, 30)
UpperCAmelCase_ = tokenizer(A , entity_spans=[span] , return_tensors="""pt""" )
UpperCAmelCase_ = model(**A )
UpperCAmelCase_ = encoding["""input_ids"""][0].tolist()
UpperCAmelCase_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
UpperCAmelCase_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A )
UpperCAmelCase_ = outputs.entity_logits[0][0].argmax().item()
UpperCAmelCase_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(A ) )
model.save_pretrained(A )
def A_( A ):
UpperCAmelCase_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
UpperCAmelCase_ = [json.loads(A ) for line in open(A )]
UpperCAmelCase_ = {}
for entry in data:
UpperCAmelCase_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCAmelCase_ = entity_id
break
UpperCAmelCase_ = f"""{language}:{entity_name}"""
UpperCAmelCase_ = entity_id
return new_mapping
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 486
|
from torch import nn
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : List[str] , __lowercase : Dict ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = class_size
UpperCAmelCase_ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCAmelCase_ = nn.Linear(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.mlp(__lowercase )
return logits
| 486
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A ( UpperCAmelCase ):
a_ = '''time_series_transformer'''
a_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : str , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "student_t" , __a : str = "nll" , __a : int = 1 , __a : List[int] = [1, 2, 3, 4, 5, 6, 7] , __a : Optional[Union[str, bool]] = "mean" , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : Optional[List[int]] = None , __a : Optional[List[int]] = None , __a : int = 3_2 , __a : int = 3_2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : bool = True , __a : str = "gelu" , __a : int = 6_4 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : int = 1_0_0 , __a : float = 0.0_2 , __a : Optional[int]=True , **__a : str , ) -> int:
# time series specific configuration
__UpperCAmelCase = prediction_length
__UpperCAmelCase = context_length or prediction_length
__UpperCAmelCase = distribution_output
__UpperCAmelCase = loss
__UpperCAmelCase = input_size
__UpperCAmelCase = num_time_features
__UpperCAmelCase = lags_sequence
__UpperCAmelCase = scaling
__UpperCAmelCase = num_dynamic_real_features
__UpperCAmelCase = num_static_real_features
__UpperCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = cardinality
else:
__UpperCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = embedding_dimension
else:
__UpperCAmelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
__UpperCAmelCase = input_size * len(__a ) + self._number_of_features
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def snake_case__ ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 262
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
__UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
__UpperCAmelCase = """roberta"""
elif args.model_type == "gpt2":
__UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
__UpperCAmelCase = """transformer"""
__UpperCAmelCase = model.state_dict()
__UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__UpperCAmelCase = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__UpperCAmelCase = F"""{prefix}.embeddings.{w}.weight"""
__UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
__UpperCAmelCase = F"""{prefix}.embeddings.LayerNorm.{w}"""
__UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
__UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__UpperCAmelCase = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__UpperCAmelCase = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F"""lm_head.dense.{w}"""]
__UpperCAmelCase = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F"""{prefix}.ln_f.{w}"""]
__UpperCAmelCase = state_dict["""lm_head.weight"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 582
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.