code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def lowerCamelCase__ ( _a = 4000000):
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = b, a + b
return sum(_a)
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 25
| 1
|
from collections import defaultdict
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = first_str.lower().strip()
UpperCamelCase__ = second_str.lower().strip()
# Remove whitespace
UpperCamelCase__ = first_str.replace(''' ''' , '''''' )
UpperCamelCase__ = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(A ) != len(A ):
return False
# Default values for count should be 0
UpperCamelCase__ = defaultdict(A )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(A ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__magic_name__ =input('''Enter the first string ''').strip()
__magic_name__ =input('''Enter the second string ''').strip()
__magic_name__ =check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 469
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ='''▁'''
__magic_name__ ={'''vocab_file''': '''sentencepiece.bpe.model'''}
__magic_name__ ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__magic_name__ ={
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__magic_name__ =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] =["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[int] =[]
SCREAMING_SNAKE_CASE_ : List[int] =[]
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
UpperCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase__ = legacy_behaviour
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ = 1
UpperCamelCase__ = len(self.sp_model )
UpperCamelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE_ )
}
UpperCamelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ = src_lang if src_lang is not None else '''eng_Latn'''
UpperCamelCase__ = self.lang_code_to_id[self._src_lang]
UpperCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
UpperCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a (self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a (self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1] * len(self.prefix_tokens )
UpperCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ = src_lang
UpperCamelCase__ = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tgt_lang_id
return inputs
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a (self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "eng_Latn" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fra_Latn" , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
'''simple docstring'''
UpperCamelCase__ = src_lang
UpperCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _a (self ) -> Dict:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ = [self.cur_lang_code]
UpperCamelCase__ = [self.eos_token_id]
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ = [self.cur_lang_code]
UpperCamelCase__ = [self.eos_token_id]
| 469
| 1
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase :
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=1_3 , __lowerCamelCase : Any=3_0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=3 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=3_2 , __lowerCamelCase : int=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : str=3_7 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Any=1_0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[int]=0.6 , __lowerCamelCase : str=None , ):
UpperCAmelCase__ :Dict = parent
UpperCAmelCase__ :Dict = batch_size
UpperCAmelCase__ :Any = image_size
UpperCAmelCase__ :List[Any] = patch_size
UpperCAmelCase__ :Any = num_channels
UpperCAmelCase__ :int = is_training
UpperCAmelCase__ :str = use_labels
UpperCAmelCase__ :Optional[int] = hidden_size
UpperCAmelCase__ :str = num_hidden_layers
UpperCAmelCase__ :Dict = num_attention_heads
UpperCAmelCase__ :List[str] = intermediate_size
UpperCAmelCase__ :Optional[Any] = hidden_act
UpperCAmelCase__ :Dict = hidden_dropout_prob
UpperCAmelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ :Tuple = type_sequence_label_size
UpperCAmelCase__ :List[str] = initializer_range
UpperCAmelCase__ :Optional[int] = mask_ratio
UpperCAmelCase__ :List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ :List[str] = (image_size // patch_size) ** 2
UpperCAmelCase__ :str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ :List[str] = None
if self.use_labels:
UpperCAmelCase__ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ :Optional[int] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __SCREAMING_SNAKE_CASE ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Any ):
UpperCAmelCase__ :Dict = TFViTMAEModel(config=__lowerCamelCase )
UpperCAmelCase__ :Any = model(__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int ):
UpperCAmelCase__ :Optional[Any] = TFViTMAEForPreTraining(__lowerCamelCase )
UpperCAmelCase__ :Any = model(__lowerCamelCase , training=__lowerCamelCase )
# expected sequence length = num_patches
UpperCAmelCase__ :Dict = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ :Any = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ :Union[str, Any] = 1
UpperCAmelCase__ :int = TFViTMAEForPreTraining(__lowerCamelCase )
UpperCAmelCase__ :List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ :Tuple = model(__lowerCamelCase , training=__lowerCamelCase )
UpperCAmelCase__ :str = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :str = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) :List[str] = config_and_inputs
UpperCAmelCase__ :Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCAmelCase = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :Any = TFViTMAEModelTester(self )
UpperCAmelCase__ :Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ , UpperCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ :Tuple = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase__ :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Layer ) )
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ , UpperCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ :Optional[int] = model_class(__lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ :Any = [*signature.parameters.keys()]
UpperCAmelCase__ :Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ :Any = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase__ :Tuple = model_class(__lowerCamelCase )
UpperCAmelCase__ :List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = model(__lowerCamelCase , noise=__lowerCamelCase )
UpperCAmelCase__ :Dict = copy.deepcopy(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase__ :Dict = model(**__lowerCamelCase , noise=__lowerCamelCase )
UpperCAmelCase__ :Dict = outputs_dict[0].numpy()
UpperCAmelCase__ :Union[str, Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ :str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowerCamelCase : List[Any] ):
UpperCAmelCase__ :str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowerCamelCase ):
UpperCAmelCase__ :Tuple = v.numpy()
else:
UpperCAmelCase__ :Union[str, Any] = np.array(__lowerCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase__ :int = model_class(__lowerCamelCase )
UpperCAmelCase__ :Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :int = prepare_numpy_arrays(__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = model(__lowerCamelCase , noise=__lowerCamelCase )
UpperCAmelCase__ :Dict = model(**__lowerCamelCase , noise=__lowerCamelCase )
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ :Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase__ :Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ :Union[str, Any] = tf.constant(__lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ :Tuple = tf_noise
super().check_pt_tf_models(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ :List[Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowerCamelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__lowerCamelCase , __lowerCamelCase ),)
if isinstance(__lowerCamelCase , __lowerCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowerCamelCase , '''_keras_serializable''' , __lowerCamelCase )
}
UpperCAmelCase__ :Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ :str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ :Tuple = tf.convert_to_tensor(__lowerCamelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase__ :Union[str, Any] = main_layer_class(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase__ :Dict = tf.keras.Model(__lowerCamelCase , outputs=main_layer(__lowerCamelCase ) )
UpperCAmelCase__ :List[Any] = model(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ :Optional[int] = os.path.join(__lowerCamelCase , '''keras_model.h5''' )
model.save(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = tf.keras.models.load_model(
__lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowerCamelCase , tf.keras.Model )
UpperCAmelCase__ :Tuple = model(__lowerCamelCase )
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ :Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ :List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase__ :str = model_class(__lowerCamelCase )
UpperCAmelCase__ :Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = model(__lowerCamelCase , noise=__lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase__ :Union[str, Any] = outputs.last_hidden_state.numpy()
UpperCAmelCase__ :Optional[Any] = 0
else:
UpperCAmelCase__ :Optional[int] = outputs.logits.numpy()
UpperCAmelCase__ :List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = model_class.from_pretrained(__lowerCamelCase )
UpperCAmelCase__ :str = model(__lowerCamelCase , noise=__lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase__ :Optional[int] = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase__ :Any = 0
else:
UpperCAmelCase__ :Optional[int] = after_outputs['''logits'''].numpy()
UpperCAmelCase__ :List[str] = 0
UpperCAmelCase__ :str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-5 )
def __SCREAMING_SNAKE_CASE ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase__ , UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ :Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase__ :Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase__ :Any = model_class(__lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = model(__lowerCamelCase , noise=__lowerCamelCase )
UpperCAmelCase__ :str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase__ :Optional[int] = model_class.from_config(model.config )
UpperCAmelCase__ :Optional[Any] = new_model(__lowerCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase__ :List[Any] = new_model(__lowerCamelCase , noise=__lowerCamelCase )
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Optional[Any] = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__lowerCamelCase )
def a__ ( ):
UpperCAmelCase__ :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self : int ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ :Union[str, Any] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase__ :List[Any] = self.default_image_processor
UpperCAmelCase__ :Optional[Any] = prepare_img()
UpperCAmelCase__ :Tuple = image_processor(images=__lowerCamelCase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ :Union[str, Any] = ViTMAEConfig()
UpperCAmelCase__ :Optional[int] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ :Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase__ :List[Any] = model(**__lowerCamelCase , noise=__lowerCamelCase )
# verify the logits
UpperCAmelCase__ :Optional[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase__ :Dict = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 467
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "OwlViTImageProcessor"
UpperCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str ):
UpperCAmelCase__ :int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCamelCase , )
UpperCAmelCase__ :Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : int , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str="max_length" , __lowerCamelCase : Any="np" , **__lowerCamelCase : Tuple ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase ) or (isinstance(__lowerCamelCase , __lowerCamelCase ) and not isinstance(text[0] , __lowerCamelCase )):
UpperCAmelCase__ :Any = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )]
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(text[0] , __lowerCamelCase ):
UpperCAmelCase__ :Tuple = []
# Maximum number of queries across batch
UpperCAmelCase__ :List[str] = max([len(__lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ :str = t + [''' '''] * (max_num_queries - len(__lowerCamelCase ))
UpperCAmelCase__ :Tuple = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
encodings.append(__lowerCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
UpperCAmelCase__ :List[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Any = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ :List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Union[str, Any] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ :Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
UpperCAmelCase__ :List[str] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ :Optional[Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :int = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
UpperCAmelCase__ :List[Any] = BatchEncoding()
UpperCAmelCase__ :Union[str, Any] = input_ids
UpperCAmelCase__ :Dict = attention_mask
if query_images is not None:
UpperCAmelCase__ :Tuple = BatchEncoding()
UpperCAmelCase__ :int = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ).pixel_values
UpperCAmelCase__ :Optional[int] = query_pixel_values
if images is not None:
UpperCAmelCase__ :str = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ :Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ :Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : int ):
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ):
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ):
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ):
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCamelCase , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCamelCase , )
return self.image_processor
| 467
| 1
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase_: Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class a__ ( _a ):
def __init__( self, _UpperCAmelCase = 101 ):
'''simple docstring'''
lowercase__ = length
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self, _UpperCAmelCase ):
'''simple docstring'''
return i
class a__ :
def __call__( self, _UpperCAmelCase ):
'''simple docstring'''
return {"input_ids": torch.tensor(_UpperCAmelCase ), "labels": torch.tensor(_UpperCAmelCase )}
class a__ ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__ = nn.Linear(120, 80 )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device ), input_ids
else:
return input_ids
class a__ ( _a ):
@require_torch_neuroncore
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F'''--output_dir {output_dir}'''.split()
lowercase__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(_UpperCAmelCase, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class a__ ( _a ):
@require_torch_multi_gpu
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F'''--output_dir {output_dir}'''.split()
lowercase__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(_UpperCAmelCase, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase_: List[str] = HfArgumentParser((TrainingArguments,))
lowerCAmelCase_: Union[str, Any] = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
lowerCAmelCase_: List[str] = DummyDataset(dataset_length)
def __a ( A ):
'''simple docstring'''
lowercase__ = list(range(len(A ) ) )
lowercase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
lowerCAmelCase_: List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase_: str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_: List[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_: List[str] = 2
lowerCAmelCase_: Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_: List[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_: str = None
| 668
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=18 , a=30 , a=400 , a=True , a=None , a=True , ) -> Tuple:
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = apply_ocr
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'do_resize'))
self.assertTrue(hasattr(a , 'size'))
self.assertTrue(hasattr(a , 'apply_ocr'))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , a)
self.assertIsInstance(encoding.boxes , a)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for image in image_inputs:
self.assertIsInstance(a , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
SCREAMING_SNAKE_CASE = Image.open(ds[0]['file']).convert('RGB')
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a)
self.assertListEqual(encoding.boxes , a)
# with apply_OCR = False
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=a)
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
| 73
|
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
# authorize twitter, initialize tweepy
A_ = tweepy.OAuthHandler(UpperCAmelCase__, UpperCAmelCase__ )
auth.set_access_token(UpperCAmelCase__, UpperCAmelCase__ )
A_ = tweepy.API(UpperCAmelCase__ )
# initialize a list to hold all the tweepy Tweets
A_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
A_ = api.user_timeline(screen_name=UpperCAmelCase__, count=2_00 )
# save most recent tweets
alltweets.extend(UpperCAmelCase__ )
# save the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCAmelCase__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
A_ = api.user_timeline(
screen_name=UpperCAmelCase__, count=2_00, max_id=UpperCAmelCase__ )
# save most recent tweets
alltweets.extend(UpperCAmelCase__ )
# update the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
print(F'''...{len(UpperCAmelCase__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
A_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''', """w""" ) as f:
A_ = csv.writer(UpperCAmelCase__ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(UpperCAmelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 288
| 0
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Tuple = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : Dict = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : int = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Optional[int] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : List[str] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Any:
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE ) ) as con:
SCREAMING_SNAKE_CASE_ : Tuple = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , 'tmp.sql' )
SCREAMING_SNAKE_CASE_ : str = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
SCREAMING_SNAKE_CASE_ : Any = iter_sql_file(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = iter_sql_file(SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , 'tmp.sql' )
SCREAMING_SNAKE_CASE_ : Any = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
SCREAMING_SNAKE_CASE_ : Optional[Any] = iter_sql_file(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = iter_sql_file(SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , 'tmp.sql' )
SCREAMING_SNAKE_CASE_ : Tuple = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE ).read()
with pytest.raises(SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(SCREAMING_SNAKE_CASE , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 702
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase__: Optional[int] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
lowerCAmelCase__: Dict = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
lowerCAmelCase__: List[str] = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False ):
if rouge_types is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE_ : str = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase , use_stemmer=__lowerCAmelCase )
if use_aggregator:
SCREAMING_SNAKE_CASE_ : Optional[int] = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE_ : Tuple = []
for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = scorer.score(__lowerCAmelCase , __lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(__lowerCAmelCase )
else:
scores.append(__lowerCAmelCase )
if use_aggregator:
SCREAMING_SNAKE_CASE_ : List[str] = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE_ : int = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE_ : List[Any] = [score[key] for score in scores]
return result
| 311
| 0
|
"""simple docstring"""
import re
def __A ( a_ :str) -> str:
if len(re.findall('''[ATCG]''' , a_)) != len(a_):
raise ValueError('''Invalid Strand''')
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC'''))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
'''simple docstring'''
a__ : Optional[Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
snake_case__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCAmelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCAmelCase_ )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](UpperCAmelCase_ , UpperCAmelCase_ )
operand_stack.push(UpperCAmelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a__ : Any = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 368
| 0
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( _lowerCamelCase):
A_ : Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self , _SCREAMING_SNAKE_CASE=20_48 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=[16, 16] , _SCREAMING_SNAKE_CASE=1_28 , _SCREAMING_SNAKE_CASE=4_41_00 , _SCREAMING_SNAKE_CASE=86 , _SCREAMING_SNAKE_CASE=20_48 , _SCREAMING_SNAKE_CASE=0.0 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = spectrogram_length
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Dict = feature_size // self.patch_size[1]
__lowerCAmelCase : Dict = n_fft
__lowerCAmelCase : str = sampling_rate // hop_length_to_sampling_rate
__lowerCAmelCase : List[str] = sampling_rate
__lowerCAmelCase : str = padding_value
__lowerCAmelCase : Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm='slaney' , mel_scale='slaney' , ).T
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowerCAmelCase : Optional[int] = log_spec[:, :-1]
__lowerCAmelCase : List[str] = log_spec - 20.0
__lowerCAmelCase : Dict = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowerCAmelCase : int = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
__lowerCAmelCase : int = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
__lowerCAmelCase : str = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase : Optional[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCAmelCase : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowerCAmelCase : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCAmelCase : List[str] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCAmelCase : Optional[Any] = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
__lowerCAmelCase : List[str] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCAmelCase : int = np.ones([len(_SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowerCAmelCase : Any = padded_audio_features * self.padding_value
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : str = audio_features[i]
__lowerCAmelCase : Dict = feature
# return as BatchFeature
if return_attention_mask:
__lowerCAmelCase : Optional[Any] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowerCAmelCase : Any = {'audio_values': padded_audio_features}
__lowerCAmelCase : Dict = BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_inputs
| 549
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = False ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = F"Expected string as input, found {type(_UpperCamelCase )}"
raise ValueError(_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = F"Expected boolean as use_pascal parameter, found {type(_UpperCamelCase )}"
raise ValueError(_UpperCamelCase )
__lowerCAmelCase : Tuple = input_str.split('_' )
__lowerCAmelCase : int = 0 if use_pascal else 1
__lowerCAmelCase : Any = words[start_index:]
__lowerCAmelCase : Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCAmelCase : Any = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 549
| 1
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A__ : str = datasets.logging.get_logger(__name__)
A__ : Dict = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
A__ : Optional[Any] = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
A__ : Any = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : str=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : List[str]="dummy_doc" ) -> List[str]:
lowerCamelCase_ : Any ={doc: key_lines}
lowerCamelCase_ : Optional[Any] ={doc: sys_lines}
lowerCamelCase_ : List[str] ={}
lowerCamelCase_ : int =0
lowerCamelCase_ : Any =0
lowerCamelCase_ : Optional[int] =0
lowerCamelCase_ : str =0
lowerCamelCase_ : Tuple =0
lowerCamelCase_ : int =0
lowerCamelCase_ , lowerCamelCase_ : Dict =reader.get_doc_mentions(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ : Union[str, Any] =reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Dict =reader.get_doc_mentions(lowerCamelCase__ , sys_doc_lines[doc] , lowerCamelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ : Optional[Any] =reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ : Dict =reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ : Tuple =reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : List[Any] =reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : List[Any] =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ : int =get_coref_infos(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Tuple ={}
lowerCamelCase_ : str =0
lowerCamelCase_ : Union[str, Any] =0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =evaluator.evaluate_documents(lowerCamelCase__ , lowerCamelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 100:.2f}""" , F""" Precision: {precision * 100:.2f}""" , F""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ : int =(conll / 3) * 100
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def _snake_case ( lowerCamelCase__ : Optional[int] ) -> int:
lowerCamelCase_ : Union[str, Any] =False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
lowerCamelCase_ : Dict =line.split()[5]
if not parse_col == "-":
lowerCamelCase_ : Any =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict=True , snake_case__ : str=False , snake_case__ : Dict=False , snake_case__ : str=False ):
lowerCamelCase_ : Optional[int] =[
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
lowerCamelCase_ : str =util.check_gold_parse_annotation(snake_case__ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ : Dict =evaluate(
key_lines=snake_case__ , sys_lines=snake_case__ , metrics=snake_case__ , NP_only=snake_case__ , remove_nested=snake_case__ , keep_singletons=snake_case__ , min_span=snake_case__ , )
return score
| 153
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Dict = DDIMPipeline
_UpperCAmelCase :List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase :List[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_UpperCAmelCase :Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase :Tuple = False
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Tuple =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
lowerCamelCase_ : Union[str, Any] =DDIMScheduler()
lowerCamelCase_ : int ={"unet": unet, "scheduler": scheduler}
return components
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Any=0 ):
if str(snake_case__ ).startswith("mps" ):
lowerCamelCase_ : Any =torch.manual_seed(snake_case__ )
else:
lowerCamelCase_ : List[Any] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCamelCase_ : List[Any] ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : List[Any] ="cpu"
lowerCamelCase_ : List[Any] =self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] =self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Any =self.get_dummy_inputs(snake_case__ )
lowerCamelCase_ : List[str] =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCamelCase_ : Optional[Any] =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowerCamelCase_ : Dict =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def UpperCAmelCase__ ( self : List[Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Dict ):
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : str ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Any ="google/ddpm-cifar10-32"
lowerCamelCase_ : List[Any] =UNetaDModel.from_pretrained(snake_case__ )
lowerCamelCase_ : str =DDIMScheduler()
lowerCamelCase_ : Optional[int] =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddim.to(snake_case__ )
ddim.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[int] =torch.manual_seed(0 )
lowerCamelCase_ : str =ddim(generator=snake_case__ , eta=0.0 , output_type="numpy" ).images
lowerCamelCase_ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : int =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str ="google/ddpm-ema-bedroom-256"
lowerCamelCase_ : Tuple =UNetaDModel.from_pretrained(snake_case__ )
lowerCamelCase_ : Dict =DDIMScheduler.from_pretrained(snake_case__ )
lowerCamelCase_ : str =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddpm.to(snake_case__ )
ddpm.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int =torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] =ddpm(generator=snake_case__ , output_type="numpy" ).images
lowerCamelCase_ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ : Tuple =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 153
| 1
|
from __future__ import annotations
import typing
from collections import Counter
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_lowerCAmelCase , max_perimeter + 1 ):
UpperCAmelCase_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_lowerCAmelCase ):
UpperCAmelCase_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def snake_case__ ( __SCREAMING_SNAKE_CASE = 1000 ) -> str:
UpperCAmelCase_ = pythagorean_triple(_lowerCAmelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 700
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
requires_backends(_lowercase ,['''torch'''] )
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> Any:
requires_backends(cls , ['''torch'''])
class snake_case_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''torch''']
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCAmelCase__ ( cls , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
requires_backends(cls , ['''torch'''])
| 34
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_)
| 34
| 1
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
SCREAMING_SNAKE_CASE__ : Dict = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) ,interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) ,(0.26862954, 0.26130258, 0.27577711) ),
] )
SCREAMING_SNAKE_CASE__ : Any = transform(_snake_case ).unsqueeze(0 ).to(_snake_case )
return image
def lowercase_ ( _snake_case ):
if "visual_encoder" in key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub("""visual_encoder*""" ,"""vision_model.encoder""" ,_snake_case )
if "blocks" in key:
SCREAMING_SNAKE_CASE__ : List[str] = re.sub(R"""blocks""" ,"""layers""" ,_snake_case )
if "attn" in key:
SCREAMING_SNAKE_CASE__ : Dict = re.sub(R"""attn""" ,"""self_attn""" ,_snake_case )
if "norm1" in key:
SCREAMING_SNAKE_CASE__ : List[str] = re.sub(R"""norm1""" ,"""layer_norm1""" ,_snake_case )
if "norm2" in key:
SCREAMING_SNAKE_CASE__ : Tuple = re.sub(R"""norm2""" ,"""layer_norm2""" ,_snake_case )
if "encoder.norm" in key:
SCREAMING_SNAKE_CASE__ : List[str] = re.sub(R"""encoder.norm""" ,"""post_layernorm""" ,_snake_case )
if "encoder.patch_embed.proj" in key:
SCREAMING_SNAKE_CASE__ : int = re.sub(R"""encoder.patch_embed.proj""" ,"""embeddings.patch_embedding""" ,_snake_case )
if "encoder.pos_embed" in key:
SCREAMING_SNAKE_CASE__ : List[str] = re.sub(R"""encoder.pos_embed""" ,"""embeddings.position_embedding""" ,_snake_case )
if "encoder.cls_token" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.sub(R"""encoder.cls_token""" ,"""embeddings.class_embedding""" ,_snake_case )
if "self_attn" in key:
SCREAMING_SNAKE_CASE__ : str = re.sub(R"""self_attn.proj""" ,"""self_attn.projection""" ,_snake_case )
return key
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = BlipConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = BlipConfig(projection_dim=512 ,text_config={} ,vision_config={} )
SCREAMING_SNAKE_CASE__ : Optional[int] = BlipForConditionalGeneration(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : Tuple = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
SCREAMING_SNAKE_CASE__ : Any = blip_decoder(pretrained=_snake_case ,image_size=384 ,vit="""base""" )
SCREAMING_SNAKE_CASE__ : List[str] = pt_model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
SCREAMING_SNAKE_CASE__ : str = modified_state_dict.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_key(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = value
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = 384
SCREAMING_SNAKE_CASE__ : int = load_demo_image(image_size=_snake_case ,device="""cpu""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE__ : str = tokenizer(["""a picture of"""] ).input_ids
SCREAMING_SNAKE_CASE__ : Any = hf_model.generate(_snake_case ,_snake_case )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
SCREAMING_SNAKE_CASE__ : Tuple = hf_model.generate(_snake_case )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
SCREAMING_SNAKE_CASE__ : Optional[int] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
SCREAMING_SNAKE_CASE__ : Optional[int] = blip_vqa(pretrained=_snake_case ,image_size=_snake_case ,vit="""base""" )
vqa_model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
SCREAMING_SNAKE_CASE__ : Optional[int] = modified_state_dict.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = rename_key(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
SCREAMING_SNAKE_CASE__ : List[Any] = BlipForQuestionAnswering(_snake_case )
hf_vqa_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = ["""How many dogs are in this image?"""]
SCREAMING_SNAKE_CASE__ : str = tokenizer(_snake_case ,return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = hf_vqa_model.generate(_snake_case ,_snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
SCREAMING_SNAKE_CASE__ : Any = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
SCREAMING_SNAKE_CASE__ : Optional[int] = blip_itm(pretrained=_snake_case ,image_size=_snake_case ,vit="""base""" )
itm_model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
SCREAMING_SNAKE_CASE__ : List[Any] = modified_state_dict.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = rename_key(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = value
SCREAMING_SNAKE_CASE__ : str = BlipForImageTextRetrieval(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = ["""A picture of a woman with a dog sitting in a beach"""]
SCREAMING_SNAKE_CASE__ : int = tokenizer(
_snake_case ,return_tensors="""pt""" ,padding="""max_length""" ,truncation=_snake_case ,max_length=35 ,).input_ids
hf_itm_model.load_state_dict(_snake_case )
hf_itm_model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_itm_model(_snake_case ,_snake_case ,use_itm_head=_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = hf_itm_model(_snake_case ,_snake_case ,use_itm_head=_snake_case )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] ,dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : str = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 721
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = '''detr'''
__UpperCamelCase : List[Any] = ['''past_key_values''']
__UpperCamelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__="resnet50" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ : Any = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ : int = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = None, None, None
SCREAMING_SNAKE_CASE__ : Optional[int] = use_timm_backbone
SCREAMING_SNAKE_CASE__ : Tuple = backbone_config
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = num_queries
SCREAMING_SNAKE_CASE__ : Optional[int] = d_model
SCREAMING_SNAKE_CASE__ : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = dropout
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : Tuple = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Any = init_std
SCREAMING_SNAKE_CASE__ : Dict = init_xavier_std
SCREAMING_SNAKE_CASE__ : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Dict = encoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : List[str] = backbone
SCREAMING_SNAKE_CASE__ : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Any = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : Tuple = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return cls(backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict[str, any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ : Any = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.__class__.model_type
return output
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Any = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-5
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 545
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
lowerCamelCase_ = str(bin(lowercase ) )[2:] # remove the leading "0b"
lowerCamelCase_ = str(bin(lowercase ) )[2:]
lowerCamelCase_ = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowercase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class _lowercase ( __lowerCamelCase ):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowercase ( __lowerCamelCase ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowercase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__lowercase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__lowercase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(__lowerCamelCase )
class _lowercase :
def __call__( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Union[bool, str] = False , lowerCamelCase__ : Union[bool, str] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[bool] = None , **lowerCamelCase__ : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
elif titles is None or texts is None:
A_ = titles if texts is None else texts
return super().__call__(
lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
A_ = titles if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [titles]
A_ = texts if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [texts]
A_ = len(lowerCamelCase__ )
A_ = questions if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [questions] * n_passages
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCamelCase__ )} titles and {len(lowerCamelCase__ )} texts." )
A_ = super().__call__(lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
A_ = super().__call__(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
A_ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase__ , lowerCamelCase__ )
]
}
if return_attention_mask is not False:
A_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A_ = attention_mask
return self.pad(lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
def UpperCamelCase ( self : int , lowerCamelCase__ : BatchEncoding , lowerCamelCase__ : DPRReaderOutput , lowerCamelCase__ : int = 1_6 , lowerCamelCase__ : int = 6_4 , lowerCamelCase__ : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
A_ = reader_input['''input_ids''']
A_ ,A_ ,A_ = reader_output[:3]
A_ = len(lowerCamelCase__ )
A_ = sorted(range(lowerCamelCase__ ) , reverse=lowerCamelCase__ , key=relevance_logits.__getitem__ )
A_ = []
for doc_id in sorted_docs:
A_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A_ = sequence_ids.index(self.pad_token_id )
else:
A_ = len(lowerCamelCase__ )
A_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase__ , top_spans=lowerCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase__ , start_index=lowerCamelCase__ , end_index=lowerCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : int , lowerCamelCase__ : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
A_ = []
for start_index, start_score in enumerate(lowerCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A_ = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] , reverse=lowerCamelCase__ )
A_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" )
A_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCamelCase )
class _lowercase ( __lowerCamelCase,__lowerCamelCase ):
_lowercase : Any = VOCAB_FILES_NAMES
_lowercase : Any = READER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = ['input_ids', 'attention_mask']
| 203
| 0
|
'''simple docstring'''
def UpperCAmelCase ( lowercase__ : int = 600851475143 ):
'''simple docstring'''
try:
a__ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
a__ = 2
a__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a__ = i
while n % i == 0:
a__ = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 705
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : int =16
_lowercase : int =32
def UpperCAmelCase ( lowercase__ : Accelerator , lowercase__ : int = 16 , lowercase__ : str = "bert-base-cased" ):
'''simple docstring'''
a__ = AutoTokenizer.from_pretrained(lowercase__ )
a__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
a__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ):
'''simple docstring'''
a__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config["""lr"""]
a__ = int(config["""num_epochs"""] )
a__ = int(config["""seed"""] )
a__ = int(config["""batch_size"""] )
a__ = args.model_name_or_path
set_seed(lowercase__ )
a__ , a__ = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
a__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
a__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
a__ = 1
a__ = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
a__ = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
a__ = 0
# We also need to keep track of the stating epoch so files are named properly
a__ = 0
# Now we train the model
a__ = evaluate.load("""glue""" , """mrpc""" )
a__ = 0
a__ = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
a__ = model(**lowercase__ )
a__ = outputs.loss
a__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a__ = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ = model(**lowercase__ )
a__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
a__ , a__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
a__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
a__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowercase__ )
a__ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
a__ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
a__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowercase__ , default=lowercase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=3 , help="""Number of train epochs.""" , )
a__ = parser.parse_args()
a__ = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 412
| 0
|
import numpy as np
from transformers import Pipeline
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = np.max(__snake_case ,axis=-1 ,keepdims=__snake_case )
lowerCamelCase__ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=__snake_case )
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {}
if "second_text" in kwargs:
lowerCamelCase__ = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
return self.tokenizer(__lowerCAmelCase , text_pair=__lowerCAmelCase , return_tensors=self.framework )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = model_outputs.logits[0].numpy()
lowerCamelCase__ = softmax(__lowerCAmelCase )
lowerCamelCase__ = np.argmax(__lowerCAmelCase )
lowerCamelCase__ = self.model.config.idalabel[best_class]
lowerCamelCase__ = probabilities[best_class].item()
lowerCamelCase__ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 481
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 481
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Tuple = VideoToVideoSDPipeline
__lowerCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
__lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
__lowerCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase : List[str] = False
# No `output_type`.
__lowerCAmelCase : Union[str, Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __UpperCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
_a : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
_a : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
_a : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
_a : Tuple = CLIPTextModel(lowerCamelCase_ )
_a : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> Dict:
# 3 frames
_a : List[str] = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
_a : List[Any] = torch.manual_seed(lowerCamelCase_ )
else:
_a : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_a : int = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __UpperCamelCase ( self ) -> Optional[int]:
_a : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : str = self.get_dummy_components()
_a : List[Any] = VideoToVideoSDPipeline(**lowerCamelCase_ )
_a : Optional[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : str = self.get_dummy_inputs(lowerCamelCase_ )
_a : Dict = 'np'
_a : Tuple = sd_pipe(**lowerCamelCase_ ).frames
_a : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
_a : Optional[int] = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase_ , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self ) -> Dict:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __UpperCamelCase ( self ) -> str:
pass
def __UpperCamelCase ( self ) -> str:
return super().test_progress_bar()
@slow
@skip_mps
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> Tuple:
_a : str = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
_a : List[str] = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=lowerCamelCase_ )
_a : int = video.to('cuda' )
_a : int = 'Spiderman is surfing'
_a : Tuple = pipe(lowerCamelCase_ , video=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=3 , output_type='pt' ).frames
_a : Optional[int] = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 424
|
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 424
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : Union[str, Any] = random.Random()
def snake_case__ ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ):
if rng is None:
lowerCAmelCase_: Optional[int] = global_rng
lowerCAmelCase_: Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=400 , lowerCamelCase__=2_000 , lowerCamelCase__=1 , lowerCamelCase__=0.0 , lowerCamelCase__=16_000 , lowerCamelCase__=True , lowerCamelCase__=80 , lowerCamelCase__=16 , lowerCamelCase__=64 , lowerCamelCase__="hann_window" , lowerCamelCase__=80 , lowerCamelCase__=7_600 , lowerCamelCase__=1E-10 , lowerCamelCase__=True , ):
lowerCAmelCase_: Optional[int] = parent
lowerCAmelCase_: Any = batch_size
lowerCAmelCase_: Optional[Any] = min_seq_length
lowerCAmelCase_: Optional[int] = max_seq_length
lowerCAmelCase_: int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_: Dict = feature_size
lowerCAmelCase_: Dict = padding_value
lowerCAmelCase_: List[Any] = sampling_rate
lowerCAmelCase_: List[str] = do_normalize
lowerCAmelCase_: Any = num_mel_bins
lowerCAmelCase_: Dict = hop_length
lowerCAmelCase_: Any = win_length
lowerCAmelCase_: Optional[int] = win_function
lowerCAmelCase_: Any = fmin
lowerCAmelCase_: str = fmax
lowerCAmelCase_: List[str] = mel_floor
lowerCAmelCase_: Any = return_attention_mask
def _a ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _a ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
lowerCAmelCase_: List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase_: str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_: List[Any] = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
def _a ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
if equal_length:
lowerCAmelCase_: int = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase_: Union[str, Any] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_: Optional[int] = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[int] = SpeechTaFeatureExtractor
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def _a ( self , lowerCamelCase__ ):
self.assertTrue(np.all(np.mean(lowerCamelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def _a ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase_: Optional[int] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_: int = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase_: Dict = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase_: List[Any] = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
lowerCAmelCase_: Any = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def _a ( self ):
lowerCAmelCase_: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_: List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase_: List[Any] = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase_: Union[str, Any] = [None, 1_600, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors="np" )
lowerCAmelCase_: Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_: List[Any] = range(800 , 1_400 , 200 )
lowerCAmelCase_: Tuple = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase_: str = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase_: Dict = [None, 1_600, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Any = feat_extract(lowerCamelCase__ , max_length=lowerCamelCase__ , padding=lowerCamelCase__ )
lowerCAmelCase_: List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _a ( self ):
lowerCAmelCase_: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_: List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase_: List[str] = feat_extract(
lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=1_000 , padding="max_length" , return_tensors="np" )
lowerCAmelCase_: Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _a ( self ):
lowerCAmelCase_: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_: Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase_: str = feat_extract(
lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=1_000 , padding="longest" , return_tensors="np" )
lowerCAmelCase_: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowerCAmelCase_: Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase_: Any = feat_extract(
lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=2_000 , padding="longest" , return_tensors="np" )
lowerCAmelCase_: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def _a ( self ):
lowerCAmelCase_: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_: List[Any] = np.random.rand(100 ).astype(np.floataa )
lowerCAmelCase_: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_: Optional[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase_: List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _a ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_: Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase_: Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase_: Optional[int] = feature_extractor(audio_target=lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCAmelCase_: List[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase_: Dict = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase_: Tuple = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_values
lowerCAmelCase_: Optional[Any] = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_: int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase_: Tuple = np.asarray(lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_values
lowerCAmelCase_: int = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_: int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_: List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase_: List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , processed_features[input_name] ) ) )
lowerCAmelCase_: List[str] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase__ )
lowerCAmelCase_: str = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowerCAmelCase_: Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase_: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _a ( self ):
lowerCAmelCase_: Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_: str = feat_extract.model_input_names[0]
lowerCAmelCase_: int = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowerCAmelCase_: int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase_: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _a ( self ):
lowerCAmelCase_: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_: Dict = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_: Tuple = feat_extract.model_input_names[0]
lowerCAmelCase_: Union[str, Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase_: int = feat_extract.num_mel_bins # hack!
lowerCAmelCase_: Tuple = feat_extract.pad(lowerCamelCase__ , padding="longest" , return_tensors="np" )[input_name]
lowerCAmelCase_: Dict = feat_extract.pad(lowerCamelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _a ( self ):
lowerCAmelCase_: List[str] = self.feat_extract_dict
lowerCAmelCase_: Tuple = True
lowerCAmelCase_: Union[str, Any] = self.feature_extraction_class(**lowerCamelCase__ )
lowerCAmelCase_: Tuple = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_: str = [len(lowerCamelCase__ ) for x in speech_inputs]
lowerCAmelCase_: int = feat_extract.model_input_names[0]
lowerCAmelCase_: List[str] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase_: Tuple = feat_extract.num_mel_bins # hack!
lowerCAmelCase_: int = feat_extract.pad(lowerCamelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: Dict = self.feat_extract_dict
lowerCAmelCase_: Dict = True
lowerCAmelCase_: Optional[int] = self.feature_extraction_class(**lowerCamelCase__ )
lowerCAmelCase_: int = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_: str = [len(lowerCamelCase__ ) for x in speech_inputs]
lowerCAmelCase_: Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase_: Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase_: List[str] = min(lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = feat_extract.num_mel_bins # hack!
lowerCAmelCase_: Dict = feat_extract.pad(
lowerCamelCase__ , padding="max_length" , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _a ( self , lowerCamelCase__ ):
from datasets import load_dataset
lowerCAmelCase_: int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase_: Union[str, Any] = ds.sort("id" ).select(range(lowerCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _a ( self ):
# fmt: off
lowerCAmelCase_: List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
lowerCAmelCase_: Tuple = self._load_datasamples(1 )
lowerCAmelCase_: Dict = SpeechTaFeatureExtractor()
lowerCAmelCase_: Dict = feature_extractor(lowerCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCamelCase__ , atol=1E-6 ) )
def _a ( self ):
# fmt: off
lowerCAmelCase_: Dict = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
lowerCAmelCase_: Tuple = self._load_datasamples(1 )
lowerCAmelCase_: Any = SpeechTaFeatureExtractor()
lowerCAmelCase_: Union[str, Any] = feature_extractor(audio_target=lowerCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase__ , atol=1E-4 ) )
| 613
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Dict = 'efficientnet'
def __init__( self , lowerCamelCase__ = 3 , lowerCamelCase__ = 600 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 3.1 , lowerCamelCase__ = 8 , lowerCamelCase__ = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase__ = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase__ = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase__ = [] , lowerCamelCase__ = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase__ = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase__ = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase__ = 0.2_5 , lowerCamelCase__ = "swish" , lowerCamelCase__ = 2_560 , lowerCamelCase__ = "mean" , lowerCamelCase__ = 0.0_2 , lowerCamelCase__ = 0.0_0_1 , lowerCamelCase__ = 0.9_9 , lowerCamelCase__ = 0.5 , lowerCamelCase__ = 0.2 , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
lowerCAmelCase_: str = num_channels
lowerCAmelCase_: str = image_size
lowerCAmelCase_: int = width_coefficient
lowerCAmelCase_: Union[str, Any] = depth_coefficient
lowerCAmelCase_: int = depth_divisor
lowerCAmelCase_: List[str] = kernel_sizes
lowerCAmelCase_: Tuple = in_channels
lowerCAmelCase_: List[str] = out_channels
lowerCAmelCase_: List[str] = depthwise_padding
lowerCAmelCase_: Optional[int] = strides
lowerCAmelCase_: List[str] = num_block_repeats
lowerCAmelCase_: Any = expand_ratios
lowerCAmelCase_: List[Any] = squeeze_expansion_ratio
lowerCAmelCase_: Optional[int] = hidden_act
lowerCAmelCase_: Optional[int] = hidden_dim
lowerCAmelCase_: Dict = pooling_type
lowerCAmelCase_: Optional[Any] = initializer_range
lowerCAmelCase_: int = batch_norm_eps
lowerCAmelCase_: List[str] = batch_norm_momentum
lowerCAmelCase_: List[Any] = dropout_rate
lowerCAmelCase_: Union[str, Any] = drop_connect_rate
lowerCAmelCase_: Tuple = sum(lowerCamelCase__ ) * 4
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[Any] = version.parse('1.11' )
@property
def _a ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _a ( self ):
return 1E-5
| 613
| 1
|
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCamelCase = 'vision-encoder-decoder'
__UpperCamelCase = True
def __init__( self : int , **A__ : List[Any] ) -> Any:
'''simple docstring'''
super().__init__(**A__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
a__ : Optional[int] = kwargs.pop('''encoder''' )
a__ : Dict = encoder_config.pop('''model_type''' )
a__ : Any = kwargs.pop('''decoder''' )
a__ : Optional[Any] = decoder_config.pop('''model_type''' )
a__ : Tuple = AutoConfig.for_model(A__ , **A__ )
a__ : List[Any] = AutoConfig.for_model(A__ , **A__ )
a__ : Union[str, Any] = True
@classmethod
def __lowerCAmelCase ( cls : Any , A__ : List[str] , A__ : Any , **A__ : str ) -> str:
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
a__ : Optional[int] = True
a__ : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **A__ )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = copy.deepcopy(self.__dict__ )
a__ : Optional[int] = self.encoder.to_dict()
a__ : Dict = self.decoder.to_dict()
a__ : List[str] = self.__class__.model_type
return output
class lowerCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse("1.11" )
@property
def __lowerCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return 1E-4
@property
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class lowerCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
a__ : List[Any] = OrderedDict()
a__ : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
a__ : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
a__ : Any = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Union[str, Any] = -1 , A__ : int = -1 , A__ : Any = False , A__ : Tuple = None , ) -> List[str]:
'''simple docstring'''
import torch
a__ : Any = OrderedDict()
a__ : int = super().generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
a__ , a__ : Dict = dummy_input['''input_ids'''].shape
a__ : int = (batch, encoder_sequence, self._config.encoder_hidden_size)
a__ : Dict = dummy_input.pop('''input_ids''' )
a__ : Any = dummy_input.pop('''attention_mask''' )
a__ : Tuple = torch.zeros(A__ )
return common_inputs
class lowerCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Tuple , A__ : Any ) -> List[str]:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(A__ )
def __lowerCAmelCase ( self : int , A__ : Dict , A__ : int , A__ : Optional[int] = "default" ) -> Tuple:
'''simple docstring'''
a__ : Any = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(A__ , A__ )
| 701
|
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
if b == 0:
return (1, 0)
((a__) , (a__)) : int = extended_euclid(lowerCAmelCase__ , a % b )
a__ : Optional[int] = a // b
return (y, x - k * y)
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
((a__) , (a__)) : int = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int = na * na
a__ : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
((a__) , (a__)) : Union[str, Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[Any] = (b % n + n) % n
return b
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
a__ , a__ : Union[str, Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = na * na
a__ : str = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 340
| 0
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : list, _lowerCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase, _UpperCAmelCase ) ) )
def UpperCamelCase ( _lowerCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase, (int, float) ):
_UpperCAmelCase : List[str] = (
"Expected a list of numbers as input, found "
f'''{type(_UpperCAmelCase ).__name__}'''
)
raise TypeError(_UpperCAmelCase )
else:
_UpperCAmelCase : Optional[int] = f'''Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}'''
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def UpperCamelCase ( _lowerCAmelCase : list, _lowerCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase, _UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = "M-CLIP"
def __init__( self : Union[str, Any] , A : Optional[Any]=1024 , A : List[str]=768 , **A : Union[str, Any] ):
_UpperCAmelCase : str = transformerDimSize
_UpperCAmelCase : int = imageDimSize
super().__init__(**A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = MCLIPConfig
def __init__( self : Optional[int] , A : Union[str, Any] , *A : Any , **A : Optional[int] ):
super().__init__(A , *A , **A )
_UpperCAmelCase : Optional[int] = XLMRobertaModel(A )
_UpperCAmelCase : int = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _A ( self : int , A : int , A : int ):
_UpperCAmelCase : Optional[int] = self.transformer(input_ids=A , attention_mask=A )[0]
_UpperCAmelCase : Optional[int] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A ), embs
| 244
| 0
|
import math
import sys
import cva
import numpy as np
def A__ ( _a : np.ndarray , _a : float ):
'''simple docstring'''
snake_case__ : Optional[Any] =math.sqrt(_a )
snake_case__ : Dict =1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def A__ ( _a : np.ndarray , _a : int , _a : int , _a : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] =kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def A__ ( _a : int , _a : float ):
'''simple docstring'''
snake_case__ : Union[str, Any] =np.zeros((kernel_size, kernel_size) )
for i in range(0 , _a ):
for j in range(0 , _a ):
snake_case__ : Any =math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_a , _a )
def A__ ( _a : np.ndarray , _a : float , _a : float , _a : int , ):
'''simple docstring'''
snake_case__ : str =np.zeros(img.shape )
snake_case__ : List[Any] =get_gauss_kernel(_a , _a )
snake_case__ , snake_case__ : Union[str, Any] =img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
snake_case__ : Any =get_slice(_a , _a , _a , _a )
snake_case__ : List[str] =img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case__ : List[str] =vec_gaussian(_a , _a )
snake_case__ : str =np.multiply(_a , _a )
snake_case__ : Optional[int] =np.multiply(_a , _a )
snake_case__ : Dict =np.sum(_a ) / np.sum(_a )
snake_case__ : Tuple =val
return imga
def A__ ( _a : list ):
'''simple docstring'''
snake_case__ : str =args[1] if args[1:] else """../image_data/lena.jpg"""
snake_case__ : Optional[Any] =float(args[2] ) if args[2:] else 1.0
snake_case__ : Tuple =float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case__ : Union[str, Any] =int(args[4] )
snake_case__ : Optional[int] =kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case__ : List[str] =5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = parse_args(sys.argv)
__lowerCamelCase : Dict = cva.imread(filename, 0)
cva.imshow("""input image""", img)
__lowerCamelCase : Any = img / 2_55
__lowerCamelCase : Tuple = out.astype("""float32""")
__lowerCamelCase : List[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__lowerCamelCase : Optional[Any] = out * 2_55
__lowerCamelCase : Tuple = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 448
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
__lowerCamelCase : Optional[int] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 448
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCAmelCase = Features({'''image''': Image()} )
lowerCAmelCase = Features({'''labels''': ClassLabel} )
lowerCAmelCase = "image"
lowerCAmelCase = "labels"
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,_SCREAMING_SNAKE_CASE ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCAmelCase_ : Tuple = copy.deepcopy(self )
UpperCAmelCase_ : int = self.label_schema.copy()
UpperCAmelCase_ : Optional[Any] = features[self.label_column]
UpperCAmelCase_ : Dict = label_schema
return task_template
@property
def a__ ( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase__ : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase__ : List[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , **UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def _lowerCAmelCase ( self : str , **UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def _lowerCAmelCase ( self : int , **UpperCamelCase : Tuple ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase__ : int = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : Dict = self.get_rust_tokenizer()
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : Dict = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
lowerCAmelCase__ : int = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : Tuple = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase__ : Any = image_processor(UpperCamelCase , return_tensors="""np""" )
lowerCAmelCase__ : Optional[Any] = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Tuple = """lower newer"""
lowerCAmelCase__ : Union[str, Any] = processor(text=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer(UpperCamelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_image_processor()
lowerCAmelCase__ : Optional[int] = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = """lower newer"""
lowerCAmelCase__ : Tuple = self.prepare_image_inputs()
lowerCAmelCase__ : Tuple = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : List[str] = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ : str = processor.batch_decode(UpperCamelCase )
lowerCAmelCase__ : List[str] = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : int = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = """lower newer"""
lowerCAmelCase__ : Dict = self.prepare_image_inputs()
lowerCAmelCase__ : Optional[int] = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 299
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """philschmid/bart-large-cnn-samsum"""
A__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
A__ = """summarizer"""
A__ = AutoTokenizer
A__ = AutoModelForSeqaSeqLM
A__ = ["""text"""]
A__ = ["""text"""]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors="pt" , truncation=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.model.generate(**snake_case__ )[0]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.pre_processor.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
| 295
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
lowercase_ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
lowercase_ : str = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowercase_ : List[Any] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ : Dict = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowercase_ : int = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowercase_ : Optional[Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ : List[Any] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ : Union[str, Any] = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ : Tuple = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowercase_ : List[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowercase_ : Optional[Any] = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowercase_ : List[Any] = re.compile(R'''^\s*else:''')
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> Dict:
if _re_test_backend.search(lowerCamelCase__ ) is None:
return None
_SCREAMING_SNAKE_CASE : Any = [b[0] for b in _re_backend.findall(lowerCamelCase__ )]
backends.sort()
return "_and_".join(lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : List[str] ) -> Any:
with open(lowerCamelCase__, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : List[str] = 0
while line_index < len(lowerCamelCase__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_SCREAMING_SNAKE_CASE : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase__ ).groups()[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.findall(R"\[([^\]]+)\]", lowerCamelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_SCREAMING_SNAKE_CASE : Tuple = _re_import_struct_key_value.search(lowerCamelCase__ )
if single_line_import_search is not None:
_SCREAMING_SNAKE_CASE : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_SCREAMING_SNAKE_CASE : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_SCREAMING_SNAKE_CASE : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_SCREAMING_SNAKE_CASE : Any = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase__ ) is not None:
_SCREAMING_SNAKE_CASE : List[Any] = _re_import_struct_add_many.search(lowerCamelCase__ ).groups()[0].split(", " )
_SCREAMING_SNAKE_CASE : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif _re_between_brackets.search(lowerCamelCase__ ) is not None:
_SCREAMING_SNAKE_CASE : List[str] = _re_between_brackets.search(lowerCamelCase__ ).groups()[0].split(", " )
_SCREAMING_SNAKE_CASE : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif _re_quote_object.search(lowerCamelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
_SCREAMING_SNAKE_CASE : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_SCREAMING_SNAKE_CASE : Any = []
while (
line_index < len(lowerCamelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : int = _re_import.search(lowerCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_SCREAMING_SNAKE_CASE : Optional[int] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_SCREAMING_SNAKE_CASE : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : List[Any] = _re_import.search(lowerCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCAmelCase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict ) -> Tuple:
def find_duplicates(lowerCamelCase__ : List[str] ):
return [k for k, v in collections.Counter(lowerCamelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_SCREAMING_SNAKE_CASE : Any = []
for key in import_dict_objects.keys():
_SCREAMING_SNAKE_CASE : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_SCREAMING_SNAKE_CASE : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_SCREAMING_SNAKE_CASE : Dict = "base imports" if key == "none" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _lowerCAmelCase ( ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = []
for root, _, files in os.walk(lowerCamelCase__ ):
if "__init__.py" in files:
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowerCamelCase__, "__init__.py" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_init(lowerCamelCase__ )
if objects is not None:
_SCREAMING_SNAKE_CASE : List[str] = analyze_results(*lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
_SCREAMING_SNAKE_CASE : Optional[int] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(lowerCamelCase__ ) )
if len(lowerCamelCase__ ) > 0:
raise ValueError("\n\n".join(lowerCamelCase__ ) )
def _lowerCAmelCase ( ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = []
for path, directories, files in os.walk(lowerCamelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(lowerCamelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase__ ) / folder).glob("*.py" ) ) ) == 0:
continue
_SCREAMING_SNAKE_CASE : Dict = str((Path(lowerCamelCase__ ) / folder).relative_to(lowerCamelCase__ ) )
_SCREAMING_SNAKE_CASE : Any = short_path.replace(os.path.sep, "." )
submodules.append(lowerCamelCase__ )
for fname in files:
if fname == "__init__.py":
continue
_SCREAMING_SNAKE_CASE : Optional[int] = str((Path(lowerCamelCase__ ) / fname).relative_to(lowerCamelCase__ ) )
_SCREAMING_SNAKE_CASE : Dict = short_path.replace(".py", "" ).replace(os.path.sep, "." )
if len(submodule.split("." ) ) == 1:
submodules.append(lowerCamelCase__ )
return submodules
lowercase_ : Optional[int] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def _lowerCAmelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_SCREAMING_SNAKE_CASE : Optional[Any] = direct_transformers_import(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase__, "__init__.py" ), "r" ) as f:
_SCREAMING_SNAKE_CASE : Any = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]", lowerCamelCase__ ) ) )
_SCREAMING_SNAKE_CASE : int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase__ ) > 0:
_SCREAMING_SNAKE_CASE : List[Any] = "\n".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 295
| 1
|
import math
class __lowercase :
def UpperCamelCase__ ( self , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = 0.0
__lowerCAmelCase : List[str] = 0.0
for i in range(len(A_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ ) ->list[list[int | float]]:
'''simple docstring'''
for i in range(len(A_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowercase ( ):
# Training Examples ( m, n )
__lowerCAmelCase : Any = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowerCAmelCase : int = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowerCAmelCase : List[str] = SelfOrganizingMap()
__lowerCAmelCase : str = 3
__lowerCAmelCase : Dict = 0.5
for _ in range(lowercase__ ):
for j in range(len(lowercase__ ) ):
# training sample
__lowerCAmelCase : int = training_samples[j]
# Compute the winning vector
__lowerCAmelCase : Union[str, Any] = self_organizing_map.get_winner(lowercase__ , lowercase__ )
# Update the winning vector
__lowerCAmelCase : Any = self_organizing_map.update(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# classify test sample
__lowerCAmelCase : Any = [0, 0, 0, 1]
__lowerCAmelCase : List[Any] = self_organizing_map.get_winner(lowercase__ , lowercase__ )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 492
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 492
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689
|
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689
| 1
|
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
A_ : Optional[int] ='\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
A_ : Union[str, Any] ='\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
A_ : Optional[int] ='\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Tuple , snake_case : str , snake_case : str , snake_case : int = None , snake_case : str = False , )-> Union[str, Any]:
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCamelCase = new_id
# turn into Numpy arrays
_lowerCamelCase = np.array(UpperCAmelCase_ )
_lowerCamelCase = np.array(UpperCAmelCase_ )
if reduce_labels:
_lowerCamelCase = 255
_lowerCamelCase = label - 1
_lowerCamelCase = 255
_lowerCamelCase = label != ignore_index
_lowerCamelCase = np.not_equal(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCamelCase = pred_label[mask]
_lowerCamelCase = np.array(UpperCAmelCase_ )[mask]
_lowerCamelCase = pred_label[pred_label == label]
_lowerCamelCase = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
_lowerCamelCase = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
_lowerCamelCase = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
_lowerCamelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : int , snake_case : str , snake_case : Optional[Any] = None , snake_case : Optional[Any] = False , )-> Dict:
_lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCamelCase = intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Dict , snake_case : List[str] = None , snake_case : List[str] = None , snake_case : int = False , )-> Tuple:
_lowerCamelCase = total_intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# compute metrics
_lowerCamelCase = {}
_lowerCamelCase = total_area_intersect.sum() / total_area_label.sum()
_lowerCamelCase = total_area_intersect / total_area_union
_lowerCamelCase = total_area_intersect / total_area_label
_lowerCamelCase = np.nanmean(UpperCAmelCase_ )
_lowerCamelCase = np.nanmean(UpperCAmelCase_ )
_lowerCamelCase = all_acc
_lowerCamelCase = iou
_lowerCamelCase = acc
if nan_to_num is not None:
_lowerCamelCase = {metric: np.nan_to_num(UpperCAmelCase_ , nan=UpperCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def snake_case_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ = None , a__ = None , a__ = False , ):
_lowerCamelCase = mean_iou(
results=__a , gt_seg_maps=__a , num_labels=__a , ignore_index=__a , nan_to_num=__a , label_map=__a , reduce_labels=__a , )
return iou_result
| 650
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_: int = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Any = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Any = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: List[Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowercase_: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 648
| 0
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __get__( self : Optional[Any] , __a : Optional[Any] , __a : Tuple=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
_a = "__cached_" + self.fget.__name__
_a = getattr(__a , __a , __a )
if cached is None:
_a = self.fget(__a )
setattr(__a , __a , __a )
return cached
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[Any]:
if is_torch_fx_proxy(lowercase ):
return True
if is_torch_available():
import torch
if isinstance(lowercase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowercase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowercase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowercase , np.ndarray )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
return isinstance(lowercase , np.ndarray )
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
return _is_numpy(lowercase )
def _lowerCamelCase ( lowercase : Optional[int] ) -> int:
import torch
return isinstance(lowercase , torch.Tensor )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
return False if not is_torch_available() else _is_torch(lowercase )
def _lowerCamelCase ( lowercase : int ) -> str:
import torch
return isinstance(lowercase , torch.device )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> int:
return False if not is_torch_available() else _is_torch_device(lowercase )
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[Any]:
import torch
if isinstance(lowercase , lowercase ):
if hasattr(lowercase , lowercase ):
_a = getattr(lowercase , lowercase )
else:
return False
return isinstance(lowercase , torch.dtype )
def _lowerCamelCase ( lowercase : Dict ) -> Optional[Any]:
return False if not is_torch_available() else _is_torch_dtype(lowercase )
def _lowerCamelCase ( lowercase : Tuple ) -> List[str]:
import tensorflow as tf
return isinstance(lowercase , tf.Tensor )
def _lowerCamelCase ( lowercase : Dict ) -> str:
return False if not is_tf_available() else _is_tensorflow(lowercase )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[Any]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowercase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowercase )
return type(lowercase ) == tf.Tensor
def _lowerCamelCase ( lowercase : int ) -> List[str]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(lowercase , jnp.ndarray )
def _lowerCamelCase ( lowercase : List[Any] ) -> int:
return False if not is_flax_available() else _is_jax(lowercase )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> List[Any]:
if isinstance(lowercase , (dict, UserDict) ):
return {k: to_py_obj(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase , (list, tuple) ):
return [to_py_obj(lowercase ) for o in obj]
elif is_tf_tensor(lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase ).tolist()
elif isinstance(lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCamelCase ( lowercase : Dict ) -> str:
if isinstance(lowercase , (dict, UserDict) ):
return {k: to_numpy(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase , (list, tuple) ):
return np.array(lowercase )
elif is_tf_tensor(lowercase ):
return obj.numpy()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase )
else:
return obj
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] ):
_a = fields(self )
# Safety and consistency checks
if not len(__a ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
_a = getattr(self , class_fields[0].name )
_a = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__a ):
if isinstance(__a , __a ):
_a = first_field.items()
_a = True
else:
try:
_a = iter(__a )
_a = True
except TypeError:
_a = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a ):
if (
not isinstance(__a , (list, tuple) )
or not len(__a ) == 2
or not isinstance(element[0] , __a )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_a = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_a = element[1]
elif first_field is not None:
_a = first_field
else:
for field in class_fields:
_a = getattr(self , field.name )
if v is not None:
_a = v
def __delitem__( self : List[str] , *__a : str , **__a : List[str] ):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def UpperCamelCase__ ( self : Optional[int] , *__a : Dict , **__a : Tuple ):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def UpperCamelCase__ ( self : int , *__a : Union[str, Any] , **__a : str ):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def UpperCamelCase__ ( self : Optional[Any] , *__a : List[Any] , **__a : Optional[int] ):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : Any , __a : Union[str, Any] ):
if isinstance(__a , __a ):
_a = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Tuple , __a : Optional[int] , __a : str ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a , __a )
super().__setattr__(__a , __a )
def __setitem__( self : Union[str, Any] , __a : str , __a : Any ):
# Will raise a KeyException if needed
super().__setitem__(__a , __a )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a , __a )
def UpperCamelCase__ ( self : Any ):
return tuple(self[k] for k in self.keys() )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls : Union[str, Any] , __a : int ):
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='longest'
__a ='max_length'
__a ='do_not_pad'
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='pt'
__a ='tf'
__a ='np'
__a ='jax'
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : List[ContextManager] ):
_a = context_managers
_a = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__a )
def __exit__( self : Optional[Any] , *__a : Any , **__a : Optional[int] ):
self.stack.__exit__(*__a , **__a )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> int:
_a = infer_framework(lowercase )
if framework == "tf":
_a = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_a = inspect.signature(model_class.forward ) # PyTorch models
else:
_a = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCamelCase ( lowercase : Dict ) -> Optional[Any]:
_a = model_class.__name__
_a = infer_framework(lowercase )
if framework == "tf":
_a = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_a = inspect.signature(model_class.forward ) # PyTorch models
else:
_a = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCamelCase ( lowercase : MutableMapping , lowercase : str = "" , lowercase : str = "." ) -> Any:
def _flatten_dict(lowercase : List[str] , lowercase : List[Any]="" , lowercase : List[Any]="." ):
for k, v in d.items():
_a = str(lowercase ) + delimiter + str(lowercase ) if parent_key else k
if v and isinstance(lowercase , lowercase ):
yield from flatten_dict(lowercase , lowercase , delimiter=lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(lowercase , lowercase , lowercase ) )
@contextmanager
def _lowerCamelCase ( lowercase : Tuple , lowercase : bool = False ) -> Any:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Any=None ) -> Optional[Any]:
if is_numpy_array(lowercase ):
return np.transpose(lowercase , axes=lowercase )
elif is_torch_tensor(lowercase ):
return array.T if axes is None else array.permute(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.transpose(lowercase , perm=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.transpose(lowercase , axes=lowercase )
else:
raise ValueError(F'Type not supported for transpose: {type(lowercase )}.' )
def _lowerCamelCase ( lowercase : Dict , lowercase : int ) -> Optional[Any]:
if is_numpy_array(lowercase ):
return np.reshape(lowercase , lowercase )
elif is_torch_tensor(lowercase ):
return array.reshape(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.reshape(lowercase , lowercase )
elif is_jax_tensor(lowercase ):
return jnp.reshape(lowercase , lowercase )
else:
raise ValueError(F'Type not supported for reshape: {type(lowercase )}.' )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any]=None ) -> Tuple:
if is_numpy_array(lowercase ):
return np.squeeze(lowercase , axis=lowercase )
elif is_torch_tensor(lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.squeeze(lowercase , axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.squeeze(lowercase , axis=lowercase )
else:
raise ValueError(F'Type not supported for squeeze: {type(lowercase )}.' )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple ) -> Any:
if is_numpy_array(lowercase ):
return np.expand_dims(lowercase , lowercase )
elif is_torch_tensor(lowercase ):
return array.unsqueeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.expand_dims(lowercase , axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.expand_dims(lowercase , axis=lowercase )
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase )}.' )
def _lowerCamelCase ( lowercase : Optional[int] ) -> Union[str, Any]:
if is_numpy_array(lowercase ):
return np.size(lowercase )
elif is_torch_tensor(lowercase ):
return array.numel()
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.size(lowercase )
elif is_jax_tensor(lowercase ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase )}.' )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[str]:
for key, value in auto_map.items():
if isinstance(lowercase , (tuple, list) ):
_a = [F'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_a = F'{repo_id}--{value}'
return auto_map
def _lowerCamelCase ( lowercase : List[str] ) -> int:
for base_class in inspect.getmro(lowercase ):
_a = base_class.__module__
_a = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 703
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100_0000 ) -> int:
_a = set(range(3 , lowercase , 2 ) )
primes.add(2 )
for p in range(3 , lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase , lowercase ) ) )
_a = [float(lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase , limit + 1 , lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 521
| 0
|
"""simple docstring"""
import random
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : str ) -> tuple[list[int], list[int]]:
A = [ord(A_ ) for i in text]
A = []
A = []
for i in plain:
A = random.randint(1 ,300 )
A = (i + k) * k
cipher.append(A_ )
key.append(A_ )
return cipher, key
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : list[int] ,A_ : list[int] ) -> str:
A = []
for i in range(len(A_ ) ):
A = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(A_ ) )
return "".join(A_ )
if __name__ == "__main__":
_lowercase , _lowercase = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 91
|
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowercase = get_logger(__name__)
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A = os.path.join(snake_case__ , snake_case__ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'Saving model to {ckpt_dir}' )
A = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading model from {input_model_file}' )
A = torch.load(snake_case__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading model from {input_model_file}' )
A = torch.load(snake_case__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A = (
os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
A = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , )
A = state_dict['model']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case__ )
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A = FSDP.optim_state_dict(snake_case__ , snake_case__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
A = torch.load(snake_case__ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
A = (
os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
A = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , )
A = optim_state['optimizer']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ )
optimizer.load_state_dict(snake_case__ )
| 91
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[str] = TypeVar("""T""")
class lowerCamelCase_ ( Generic[T] ):
def __init__( self , __lowerCAmelCase = True ):
"""simple docstring"""
__magic_name__ :dict[T, list[T]] = {} # dictionary of lists
__magic_name__ :Tuple = directed
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase )
self.adj_list[destination_vertex].append(__lowerCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__magic_name__ :Dict = [destination_vertex]
__magic_name__ :Optional[int] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase )
__magic_name__ :Any = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__magic_name__ :Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__magic_name__ :Union[str, Any] = [destination_vertex]
__magic_name__ :Union[str, Any] = []
return self
def __repr__( self ):
"""simple docstring"""
return pformat(self.adj_list )
| 180
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''-m''', '''--pretrained_model_name_or_path''', type=snake_case, default=snake_case, required=snake_case, help='''Path to pretrained model or model identifier from huggingface.co/models.''', )
parser.add_argument(
'''-c''', '''--caption''', type=snake_case, default='''robotic cat with wings''', help='''Text used to generate images.''', )
parser.add_argument(
'''-n''', '''--images_num''', type=snake_case, default=4, help='''How much images to generate.''', )
parser.add_argument(
'''-s''', '''--seed''', type=snake_case, default=4_2, help='''Seed for random process.''', )
parser.add_argument(
'''-ci''', '''--cuda_id''', type=snake_case, default=0, help='''cuda_id.''', )
__magic_name__ :Optional[Any] = parser.parse_args()
return args
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if not len(snake_case ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
__magic_name__ , __magic_name__ :Tuple = imgs[0].size
__magic_name__ :List[Any] = Image.new('''RGB''', size=(cols * w, rows * h) )
__magic_name__ , __magic_name__ :List[Any] = grid.size
for i, img in enumerate(snake_case ):
grid.paste(snake_case, box=(i % cols * w, i // cols * h) )
return grid
def __lowercase ( snake_case, snake_case="robotic cat with wings", snake_case=7.5, snake_case=5_0, snake_case=1, snake_case=4_2, ):
"""simple docstring"""
__magic_name__ :List[Any] = torch.Generator(pipeline.device ).manual_seed(snake_case )
__magic_name__ :str = pipeline(
snake_case, guidance_scale=snake_case, num_inference_steps=snake_case, generator=snake_case, num_images_per_prompt=snake_case, ).images
__magic_name__ :Tuple = int(math.sqrt(snake_case ) )
__magic_name__ :Union[str, Any] = image_grid(snake_case, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE__ : Any = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE__ : int = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
SCREAMING_SNAKE_CASE__ : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
SCREAMING_SNAKE_CASE__ : int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE__ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
SCREAMING_SNAKE_CASE__ : str = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
SCREAMING_SNAKE_CASE__ : Dict = unet.to(torch.device("""cuda""", args.cuda_id))
SCREAMING_SNAKE_CASE__ : Any = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
SCREAMING_SNAKE_CASE__ : int = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 180
| 1
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCAmelCase = None
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__snake_case , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__snake_case , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase : Optional[Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _snake_case ( __snake_case : Union[str, Any] ):
"""simple docstring"""
def remove_articles(__snake_case : List[str] ):
return ARTICLES_REGEX.sub(""" """ , __snake_case )
def white_space_fix(__snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__snake_case : Optional[Any] ):
_lowerCamelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def _snake_case ( __snake_case : Any ):
"""simple docstring"""
if not s:
return []
return normalize_answer(__snake_case ).split()
def _snake_case ( __snake_case : Dict , __snake_case : int ):
"""simple docstring"""
return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) )
def _snake_case ( __snake_case : Tuple , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = get_tokens(__snake_case )
_lowerCamelCase : int = get_tokens(__snake_case )
_lowerCamelCase : Optional[Any] = collections.Counter(__snake_case ) & collections.Counter(__snake_case )
_lowerCamelCase : int = sum(common.values() )
if len(__snake_case ) == 0 or len(__snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowerCamelCase : List[str] = 1.0 * num_same / len(__snake_case )
_lowerCamelCase : Dict = 1.0 * num_same / len(__snake_case )
_lowerCamelCase : int = (2 * precision * recall) / (precision + recall)
return fa
def _snake_case ( __snake_case : Optional[Any] , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase : Tuple = qa["""id"""]
_lowerCamelCase : Optional[int] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowerCamelCase : List[str] = [""""""]
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
_lowerCamelCase : Any = preds[qid]
# Take max over all gold answers
_lowerCamelCase : Tuple = max(compute_exact(__snake_case , __snake_case ) for a in gold_answers )
_lowerCamelCase : List[str] = max(compute_fa(__snake_case , __snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def _snake_case ( __snake_case : List[Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : str = {}
for qid, s in scores.items():
_lowerCamelCase : Optional[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
_lowerCamelCase : Any = float(not qid_to_has_ans[qid] )
else:
_lowerCamelCase : List[str] = s
return new_scores
def _snake_case ( __snake_case : Any , __snake_case : int , __snake_case : Any=None ):
"""simple docstring"""
if not qid_list:
_lowerCamelCase : int = len(__snake_case )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_lowerCamelCase : List[str] = len(__snake_case )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _snake_case ( __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : int ):
"""simple docstring"""
for k in new_eval:
_lowerCamelCase : Optional[int] = new_eval[k]
def _snake_case ( __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Tuple ):
"""simple docstring"""
plt.step(__snake_case , __snake_case , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__snake_case , __snake_case , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__snake_case )
plt.savefig(__snake_case )
plt.clf()
def _snake_case ( __snake_case : int , __snake_case : Dict , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=None , __snake_case : List[Any]=None ):
"""simple docstring"""
_lowerCamelCase : Any = sorted(__snake_case , key=lambda __snake_case : na_probs[k] )
_lowerCamelCase : List[Any] = 0.0
_lowerCamelCase : Optional[Any] = 1.0
_lowerCamelCase : Tuple = 0.0
_lowerCamelCase : Tuple = [1.0]
_lowerCamelCase : List[Any] = [0.0]
_lowerCamelCase : Optional[int] = 0.0
for i, qid in enumerate(__snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowerCamelCase : List[str] = true_pos / float(i + 1 )
_lowerCamelCase : Optional[int] = true_pos / float(__snake_case )
if i == len(__snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__snake_case )
recalls.append(__snake_case )
if out_image:
plot_pr_curve(__snake_case , __snake_case , __snake_case , __snake_case )
return {"ap": 100.0 * avg_prec}
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[Any] ):
"""simple docstring"""
if out_image_dir and not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
_lowerCamelCase : str = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowerCamelCase : List[str] = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_lowerCamelCase : int = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_lowerCamelCase : Union[str, Any] = {k: float(__snake_case ) for k, v in qid_to_has_ans.items()}
_lowerCamelCase : Optional[Any] = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__snake_case , __snake_case , """pr_exact""" )
merge_eval(__snake_case , __snake_case , """pr_f1""" )
merge_eval(__snake_case , __snake_case , """pr_oracle""" )
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] ):
"""simple docstring"""
if not qid_list:
return
_lowerCamelCase : Tuple = [na_probs[k] for k in qid_list]
_lowerCamelCase : int = np.ones_like(__snake_case ) / float(len(__snake_case ) )
plt.hist(__snake_case , weights=__snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(__snake_case , F'na_prob_hist_{name}.png' ) )
plt.clf()
def _snake_case ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowerCamelCase : Optional[Any] = num_no_ans
_lowerCamelCase : str = cur_score
_lowerCamelCase : Dict = 0.0
_lowerCamelCase : Optional[int] = sorted(__snake_case , key=lambda __snake_case : na_probs[k] )
for i, qid in enumerate(__snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowerCamelCase : List[str] = scores[qid]
else:
if preds[qid]:
_lowerCamelCase : str = -1
else:
_lowerCamelCase : List[str] = 0
cur_score += diff
if cur_score > best_score:
_lowerCamelCase : Any = cur_score
_lowerCamelCase : Dict = na_probs[qid]
return 100.0 * best_score / len(__snake_case ), best_thresh
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase , _lowerCamelCase : List[Any] = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase : Dict = best_exact
_lowerCamelCase : Optional[int] = exact_thresh
_lowerCamelCase : Any = best_fa
_lowerCamelCase : Any = fa_thresh
def _snake_case ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
_lowerCamelCase : Union[str, Any] = json.load(__snake_case )
_lowerCamelCase : Dict = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_lowerCamelCase : Union[str, Any] = json.load(__snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowerCamelCase : Any = json.load(__snake_case )
else:
_lowerCamelCase : Optional[Any] = {k: 0.0 for k in preds}
_lowerCamelCase : List[Any] = make_qid_to_has_ans(__snake_case ) # maps qid to True/False
_lowerCamelCase : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if v]
_lowerCamelCase : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = get_raw_scores(__snake_case , __snake_case )
_lowerCamelCase : Dict = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh )
_lowerCamelCase : List[str] = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh )
_lowerCamelCase : Optional[Any] = make_eval_dict(__snake_case , __snake_case )
if has_ans_qids:
_lowerCamelCase : Any = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case )
merge_eval(__snake_case , __snake_case , """HasAns""" )
if no_ans_qids:
_lowerCamelCase : List[Any] = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case )
merge_eval(__snake_case , __snake_case , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , OPTS.out_image_dir )
histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__snake_case , __snake_case )
else:
print(json.dumps(__snake_case , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 88
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_snake_case : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 493
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = process
_SCREAMING_SNAKE_CASE = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = self.dataset[i]
_SCREAMING_SNAKE_CASE = self.process(UpperCamelCase , **self.params )
return processed
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
_SCREAMING_SNAKE_CASE = loader
_SCREAMING_SNAKE_CASE = infer
_SCREAMING_SNAKE_CASE = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = loader_batch_size
# Internal bookkeeping
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def lowercase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_SCREAMING_SNAKE_CASE = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
# Convert ModelOutput to tuple first
_SCREAMING_SNAKE_CASE = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase , UpperCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_SCREAMING_SNAKE_CASE = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_SCREAMING_SNAKE_CASE = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(UpperCamelCase )
self._loader_batch_index += 1
return result
def lowercase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_SCREAMING_SNAKE_CASE = next(self.iterator )
_SCREAMING_SNAKE_CASE = self.infer(UpperCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase , torch.Tensor ):
_SCREAMING_SNAKE_CASE = processed
else:
_SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE = processed[key]
if isinstance(UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = len(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE = observed_batch_size
# Setting internal index to unwrap the batch
_SCREAMING_SNAKE_CASE = processed
_SCREAMING_SNAKE_CASE = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
super().__init__(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def __iter__( self ):
_SCREAMING_SNAKE_CASE = iter(self.loader )
_SCREAMING_SNAKE_CASE = None
return self
def lowercase ( self ):
if self.subiterator is None:
_SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_SCREAMING_SNAKE_CASE = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
_SCREAMING_SNAKE_CASE = next(self.subiterator )
return processed
class lowerCAmelCase ( __UpperCAmelCase ):
def __iter__( self ):
_SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def lowercase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE = self.loader_batch_item()
_SCREAMING_SNAKE_CASE = item.pop("is_last" )
accumulator.append(UpperCamelCase )
if is_last:
return accumulator
while not is_last:
_SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase , torch.Tensor ):
_SCREAMING_SNAKE_CASE = processed
else:
_SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE = processed[key]
if isinstance(UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = len(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE = observed_batch_size
_SCREAMING_SNAKE_CASE = processed
_SCREAMING_SNAKE_CASE = 0
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE = self.loader_batch_item()
_SCREAMING_SNAKE_CASE = item.pop("is_last" )
accumulator.append(UpperCamelCase )
if is_last:
return accumulator
else:
_SCREAMING_SNAKE_CASE = processed
_SCREAMING_SNAKE_CASE = item.pop("is_last" )
accumulator.append(UpperCamelCase )
return accumulator
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , UpperCamelCase ):
return self.dataset[i][self.key]
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = keya
_SCREAMING_SNAKE_CASE = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , UpperCamelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 493
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84
| 1
|
def UpperCamelCase__ ( UpperCAmelCase = 1000 ) -> int:
"""simple docstring"""
_a : Optional[int] = 2**power
_a : Union[str, Any] = 0
while n:
_a , _a : Optional[int] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 307
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
_a : Any = LxmertConfig.from_json_file(UpperCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
_a : List[Any] = LxmertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 307
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = torch.load(__A , map_location='''cpu''')
if "model" in sd.keys():
_a = torch.load(__A , map_location='''cpu''')['''model''']
# pop unnecessary weights
_a = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A)
_a = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_a = sd.pop(__A)
_a = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
_a = sd[key]
# We split QKV in separate Q,K,V
_a = key.replace('''.qkv_proj.''' , '''.q_proj.''')
_a = key.replace('''.qkv_proj.''' , '''.k_proj.''')
_a = key.replace('''.qkv_proj.''' , '''.v_proj.''')
_a = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_a , _a , _a = torch.split(__A , depth // 3 , dim=0)
_a = q
_a = k
_a = v
del sd[key]
return sd
@torch.no_grad()
def lowerCAmelCase (__A , __A , __A=None):
"""simple docstring"""
_a = load_checkpoint(__A)
if config is not None:
_a = OPTConfig.from_pretrained(__A)
else:
_a = OPTConfig()
_a = OPTModel(__A).half().eval()
model.load_state_dict(__A)
# Check results
Path(__A).mkdir(exist_ok=__A)
model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowercase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 11
|
"""simple docstring"""
from __future__ import annotations
import time
A : List[str] = list[tuple[int, int]]
A : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Node | None ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = pos_x
UpperCamelCase__ = pos_y
UpperCamelCase__ = (pos_y, pos_x)
UpperCamelCase__ = goal_x
UpperCamelCase__ = goal_y
UpperCamelCase__ = parent
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :tuple[int, int] , lowerCamelCase_ :tuple[int, int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase_ )
UpperCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase_ )
UpperCamelCase__ = [self.start]
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Any ) -> Path | None:
"""simple docstring"""
while self.node_queue:
UpperCamelCase__ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase__ = True
return self.retrace_path(lowerCamelCase_ )
UpperCamelCase__ = self.get_successors(lowerCamelCase_ )
for node in successors:
self.node_queue.append(lowerCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase__ ( self :str , lowerCamelCase_ :Node ) -> list[Node]:
"""simple docstring"""
UpperCamelCase__ = []
for action in delta:
UpperCamelCase__ = parent.pos_x + action[1]
UpperCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , lowerCamelCase_ ) )
return successors
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Node | None ) -> Path:
"""simple docstring"""
UpperCamelCase__ = node
UpperCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase__ = current_node.parent
path.reverse()
return path
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase__ = BreadthFirstSearch(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = BreadthFirstSearch(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = False
def lowerCamelCase__ ( self :int ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCamelCase__ = self.fwd_bfs.node_queue.pop(0 )
UpperCamelCase__ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCamelCase__ = True
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = current_bwd_node
UpperCamelCase__ = current_fwd_node
UpperCamelCase__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Node , lowerCamelCase_ :Node ) -> Path:
"""simple docstring"""
UpperCamelCase__ = self.fwd_bfs.retrace_path(lowerCamelCase_ )
UpperCamelCase__ = self.bwd_bfs.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A : str = (0, 0)
A : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Any = time.time()
A : Optional[int] = BreadthFirstSearch(init, goal)
A : List[str] = bfs.search()
A : Dict = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
A : Optional[int] = time.time()
A : Any = BidirectionalBreadthFirstSearch(init, goal)
A : List[Any] = bd_bfs.search()
A : Dict = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 516
| 0
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCamelCase : Dict = ''
UpperCamelCase : Any = ''
UpperCamelCase : Optional[Any] = ''
UpperCamelCase : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def A__ ( ):
lowerCamelCase__ , lowerCamelCase__ = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print("""Processing...""" )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase__ = random_chars(32 )
lowerCamelCase__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCamelCase__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
lowerCamelCase__ = []
for anno in new_annos[index]:
lowerCamelCase__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = []
lowerCamelCase__ = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ):
lowerCamelCase__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
lowerCamelCase__ = in_file.readlines()
lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' )
lowerCamelCase__ = []
for obj_list in obj_lists:
lowerCamelCase__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ):
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for idx in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = []
lowerCamelCase__ = img_list[idx]
path_list.append(__lowerCAmelCase )
lowerCamelCase__ = anno_list[idx]
lowerCamelCase__ = cva.imread(__lowerCAmelCase )
if flip_type == 1:
lowerCamelCase__ = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
lowerCamelCase__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCamelCase__ = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
lowerCamelCase__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def A__ ( __lowerCAmelCase : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase__ = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 9
|
'''simple docstring'''
def A__ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9
| 1
|
import math
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCamelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 89
|
"""simple docstring"""
__snake_case : str = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : str = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Optional[Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 571
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 710
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase_ : int = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : List[Any] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase_ : Dict = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase_ : str = tempfile.mkdtemp()
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,_SCREAMING_SNAKE_CASE )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
# load decoder from hub
UpperCAmelCase_ : str = '''hf-internal-testing/ngram-beam-search-decoder'''
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_SCREAMING_SNAKE_CASE )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_SCREAMING_SNAKE_CASE )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase_ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = self.get_feature_extractor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = floats_list((3, 1_000) )
UpperCAmelCase_ : Any = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : Any = processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = '''This is a test string'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ,_SCREAMING_SNAKE_CASE=(2, 10, 16) ,_SCREAMING_SNAKE_CASE=77 ) -> int:
np.random.seed(_SCREAMING_SNAKE_CASE )
return np.random.rand(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
UpperCAmelCase_ : Optional[Any] = processor.decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = decoder.decode_beams(_SCREAMING_SNAKE_CASE )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_feature_extractor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase_ : Dict = processor.batch_decode(_SCREAMING_SNAKE_CASE )
else:
with get_context(_SCREAMING_SNAKE_CASE ).Pool() as pool:
UpperCAmelCase_ : str = processor.batch_decode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = list(_SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase_ : List[str] = decoder.decode_beams_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.logit_score )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.lm_score )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = 15
UpperCAmelCase_ : Optional[Any] = -20.0
UpperCAmelCase_ : Tuple = -4.0
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(
_SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = decoded_processor_out.text
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase_ : List[str] = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : str = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase_ : Union[str, Any] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase_ : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_SCREAMING_SNAKE_CASE )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = 2.0
UpperCAmelCase_ : Optional[int] = 5.0
UpperCAmelCase_ : List[Any] = -20.0
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : str = processor.batch_decode(
_SCREAMING_SNAKE_CASE ,alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Tuple = decoded_processor_out.text
UpperCAmelCase_ : Optional[Any] = list(_SCREAMING_SNAKE_CASE )
decoder.reset_params(
alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,)
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase_ : Optional[int] = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : Dict = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase_ : Any = os.listdir(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : int = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : List[str] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase_ : List[Any] = os.listdir(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = os.listdir(_SCREAMING_SNAKE_CASE )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase_ : Optional[Any] = processor_wavaveca(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : List[str] = processor_auto(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
UpperCAmelCase_ : Any = self._get_dummy_logits()
UpperCAmelCase_ : int = processor_wavaveca.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = processor_auto.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def a__ ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : int = [d[key] for d in offsets]
return retrieved_list
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Union[str, Any] = self._get_dummy_logits()[0]
UpperCAmelCase_ : Tuple = processor.decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : int = self._get_dummy_logits()
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a__ ( self ) -> Union[str, Any]:
import torch
UpperCAmelCase_ : List[str] = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase_ : Tuple = iter(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = next(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase_ : Dict = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase_ : List[str] = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ).logits.cpu().numpy()
UpperCAmelCase_ : str = processor.decode(logits[0] ,output_word_offsets=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase_ : Any = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase_ : Any = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) ,output.text )
# output times
UpperCAmelCase_ : List[Any] = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''start_time''' ) )
UpperCAmelCase_ : str = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''end_time''' ) )
# fmt: off
UpperCAmelCase_ : str = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
UpperCAmelCase_ : Optional[int] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01 ) )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01 ) )
| 300
| 0
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
UpperCAmelCase_ : int = float(_SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError("Please enter a valid number" )
UpperCAmelCase_ : List[str] = decimal - int(_SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(_SCREAMING_SNAKE_CASE ), 1
else:
UpperCAmelCase_ : Any = len(str(_SCREAMING_SNAKE_CASE ).split("." )[1] )
UpperCAmelCase_ : Any = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase_ : Any = 10**number_of_frac_digits
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = denominator, numerator
while True:
UpperCAmelCase_ : List[str] = dividend % divisor
if remainder == 0:
break
UpperCAmelCase_ , UpperCAmelCase_ : int = divisor, remainder
UpperCAmelCase_ , UpperCAmelCase_ : Dict = numerator / divisor, denominator / divisor
return int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction('67') = }""")
print(f"""{decimal_to_fraction('45.0') = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction('6.25') = }""")
print(f"""{decimal_to_fraction('78td') = }""")
| 71
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a ( unittest.TestCase ):
@property
def __lowercase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.dummy_uncond_unet
UpperCamelCase__ : List[str] = ScoreSdeVeScheduler()
UpperCamelCase__ : Union[str, Any] = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
sde_ve.to(SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE ).images
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : Any = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )[
0
]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __a ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = "google/ncsnpp-church-256"
UpperCamelCase__ : Tuple = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
sde_ve.to(SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : Tuple = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE ).images
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase__ : Any = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 228
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowerCamelCase : Dict = logging.getLogger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Any = 'sequence-classification'
def __init__( self , A_ ):
'''simple docstring'''
if type(A_ ) == dict:
UpperCamelCase : Any = Namespace(**A_ )
UpperCamelCase : Optional[int] = glue_output_modes[hparams.task]
UpperCamelCase : Any = glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return self.model(**A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase : Optional[int] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase : List[Any] = self(**A_ )
UpperCamelCase : Dict = outputs[0]
UpperCamelCase : Optional[Any] = self.trainer.lr_schedulers[0]["scheduler"]
UpperCamelCase : int = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.hparams
UpperCamelCase : Tuple = processors[args.task]()
UpperCamelCase : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
UpperCamelCase : str = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , A_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCamelCase : Optional[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
UpperCamelCase : Any = convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , A_ )
torch.save(A_ , A_ )
def __UpperCamelCase( self , A_ , A_ , A_ = False ):
'''simple docstring'''
UpperCamelCase : Dict = "dev" if mode == "test" else mode
UpperCamelCase : str = self._feature_file(A_ )
logger.info("Loading features from cached file %s" , A_ )
UpperCamelCase : Dict = torch.load(A_ )
UpperCamelCase : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
UpperCamelCase : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCamelCase : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase : List[str] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ , shuffle=A_ , )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase : Tuple = self(**A_ )
UpperCamelCase : Dict = outputs[:2]
UpperCamelCase : Dict = logits.detach().cpu().numpy()
UpperCamelCase : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
UpperCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCamelCase : Any = np.argmax(A_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase : List[str] = np.squeeze(A_ )
UpperCamelCase : Union[str, Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase : Any = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase : List[str] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_ )}
UpperCamelCase : Optional[Any] = dict(results.items() )
UpperCamelCase : str = results
return ret, preds_list, out_label_list
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._eval_end(A_ )
UpperCamelCase : Union[str, Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._eval_end(A_ )
UpperCamelCase : List[Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=A_ , required=A_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=A_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A_ ( ) -> Optional[int]:
UpperCamelCase : Dict = argparse.ArgumentParser()
add_generic_args(_lowerCAmelCase , os.getcwd() )
UpperCamelCase : Optional[int] = GLUETransformer.add_model_specific_args(_lowerCAmelCase , os.getcwd() )
UpperCamelCase : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCamelCase : List[Any] = os.path.join(
"./results" , F"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
UpperCamelCase : Optional[int] = GLUETransformer(_lowerCAmelCase )
UpperCamelCase : str = generic_train(_lowerCAmelCase , _lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCamelCase : List[str] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_lowerCAmelCase ) )
UpperCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 719
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 38
| 0
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
a__ = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
a__ = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
a__ = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
if version.parse(scb.__version__) < version.parse("""1.4.12"""):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int = CHRF.CHAR_ORDER , UpperCamelCase__ : int = CHRF.WORD_ORDER , UpperCamelCase__ : int = CHRF.BETA , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
snake_case__ = len(references[0])
if any(len(UpperCamelCase__) != references_per_prediction for refs in references):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""")
snake_case__ = [[refs[i] for refs in references] for i in range(UpperCamelCase__)]
snake_case__ = CHRF(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = sb_chrf.corpus_score(UpperCamelCase__ , UpperCamelCase__)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 654
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654
| 1
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = 'pytorch_model.bin'
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : str = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
__snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
__snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
__snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
__snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={"""help""": """The name of the task to train on."""} , )
__snake_case : Optional[List[str]] = dataclasses.field(
default=snake_case__ , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : str = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
__snake_case : Optional[str] = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
__snake_case : Optional[str] = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
__snake_case : Optional[int] = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
__snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
__snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
__snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
__snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
__snake_case : Optional[int] = dataclasses.field(
default=1_00 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__snake_case : Optional[int] = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Random seed for initialization."""} , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[str] =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__lowerCamelCase : Optional[Any] =dataset.filter(lambda SCREAMING_SNAKE_CASE : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__lowerCamelCase : Tuple =int(eval_result * len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =dataset.select(range(SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : int =dataset.remove_columns(['''label''', '''probability'''] )
__lowerCamelCase : int =dataset.rename_column('''prediction''' , '''label''' )
__lowerCamelCase : int =dataset.map(lambda SCREAMING_SNAKE_CASE : {"label": idalabel[example["label"]]} )
__lowerCamelCase : Dict =dataset.shuffle(seed=args.seed )
__lowerCamelCase : Tuple =os.path.join(SCREAMING_SNAKE_CASE , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
else:
dataset.to_json(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__lowerCamelCase : Union[str, Any] =STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =STDataArguments(train_file=SCREAMING_SNAKE_CASE , infer_file=SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE ).items():
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Sanity checks
__lowerCamelCase : Optional[int] ={}
__lowerCamelCase : Any =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__lowerCamelCase : List[str] =args.train_file
__lowerCamelCase : int =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__lowerCamelCase : Tuple =args.eval_file
for key in data_files:
__lowerCamelCase : str =data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__lowerCamelCase : str =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
__lowerCamelCase : Dict =F'{args.output_dir}/self-train_iter-{{}}'.format
__lowerCamelCase : str =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
__lowerCamelCase : Tuple =None
__lowerCamelCase : Union[str, Any] =None
__lowerCamelCase : Any =0
__lowerCamelCase : str =False
# Show the progress bar
__lowerCamelCase : Union[str, Any] =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__lowerCamelCase : Any =data_dir_format(SCREAMING_SNAKE_CASE )
assert os.path.exists(SCREAMING_SNAKE_CASE )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__lowerCamelCase : Optional[int] =os.path.join(SCREAMING_SNAKE_CASE , '''stage-1''' )
__lowerCamelCase : Optional[Any] ={
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
arguments_dict.update({key: value} )
__lowerCamelCase : List[Any] =os.path.join(SCREAMING_SNAKE_CASE , '''best-checkpoint''' , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE )
finetune(**SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__lowerCamelCase : Optional[Any] =os.path.join(SCREAMING_SNAKE_CASE , '''best-checkpoint''' )
__lowerCamelCase : str =os.path.join(SCREAMING_SNAKE_CASE , '''stage-2''' )
# Update arguments_dict
__lowerCamelCase : Union[str, Any] =model_path
__lowerCamelCase : List[str] =data_files['''train''']
__lowerCamelCase : Dict =current_output_dir
__lowerCamelCase : Dict =os.path.join(SCREAMING_SNAKE_CASE , '''best-checkpoint''' , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE )
finetune(**SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE )
__lowerCamelCase : str =iteration
__lowerCamelCase : Union[str, Any] =data_dir_format(iteration + 1 )
__lowerCamelCase : Dict =AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''best-checkpoint''' ) )
__lowerCamelCase : Optional[Any] =config.idalabel
__lowerCamelCase : Any =os.path.join(SCREAMING_SNAKE_CASE , '''eval_results_best-checkpoint.json''' )
__lowerCamelCase : int =os.path.join(SCREAMING_SNAKE_CASE , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
__lowerCamelCase : Tuple =float(json.load(SCREAMING_SNAKE_CASE )[args.eval_metric] )
__lowerCamelCase : str =os.path.join(SCREAMING_SNAKE_CASE , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE )
# Loading the dataset from local csv or json files.
__lowerCamelCase : List[Any] =load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
__lowerCamelCase : Tuple =load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
shutil.copy(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(SCREAMING_SNAKE_CASE ):
shutil.copy(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
__lowerCamelCase : Optional[Any] =os.path.join(SCREAMING_SNAKE_CASE , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__lowerCamelCase : List[Any] =eval_result
if best_iteration is None:
__lowerCamelCase : List[str] =new_iteration
__lowerCamelCase : Tuple =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__lowerCamelCase : Any =new_iteration
__lowerCamelCase : int =new_eval_result
__lowerCamelCase : Tuple =0
else:
if new_eval_result == best_eval_result:
__lowerCamelCase : Tuple =new_iteration
__lowerCamelCase : str =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__lowerCamelCase : str =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE , F'eval_results_iter-{iteration}.json' ) , os.path.join(SCREAMING_SNAKE_CASE , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(SCREAMING_SNAKE_CASE , '''eval_results_best-iteration.json''' ) , )
| 704
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__lowerCamelCase : Any =Vector()
def __lowercase ( self :Dict ):
__lowerCamelCase : Tuple =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__lowercase ) , '''(0,0,0,0,0,1)''' )
def __lowercase ( self :Dict ):
__lowerCamelCase : int =Vector([1, 2, 3, 4] )
self.assertEqual(len(__lowercase ) , 4 )
def __lowercase ( self :Dict ):
__lowerCamelCase : Optional[Any] =Vector([1, 2] )
__lowerCamelCase : Dict =Vector([1, 2, 3, 4, 5] )
__lowerCamelCase : List[Any] =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__lowerCamelCase : int =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Tuple =Vector([1, 2, 3] )
__lowerCamelCase : Any =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowercase ( self :str ):
__lowerCamelCase : Union[str, Any] =Vector([1, 2, 3] )
__lowerCamelCase : int =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowercase ( self :int ):
__lowerCamelCase : List[Any] =Vector([1, 2, 3] )
__lowerCamelCase : List[Any] =Vector([2, -1, 4] ) # for test of dot product
__lowerCamelCase : Any =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def __lowercase ( self :List[Any] ):
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def __lowercase ( self :Union[str, Any] ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Any =Vector([1, 2, 3] )
__lowerCamelCase : Optional[int] =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __lowercase , __lowercase ) ) , '''(3,4,7)''' )
def __lowercase ( self :Dict ):
__lowerCamelCase : List[Any] =Vector([1, 0, 0, 0, 0, 0] )
__lowerCamelCase : Optional[int] =x.copy()
self.assertEqual(str(__lowercase ) , str(__lowercase ) )
def __lowercase ( self :int ):
__lowerCamelCase : str =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__lowercase ) , '''(0,1,0)''' )
def __lowercase ( self :int ):
__lowerCamelCase : Any =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__lowercase ) )
def __lowercase ( self :int ):
__lowerCamelCase : Tuple =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : List[Any] =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__lowercase , __lowercase ) )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Optional[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : Tuple =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__lowercase , __lowercase ) )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Tuple =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowercase ( self :int ):
__lowerCamelCase : Union[str, Any] =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__lowerCamelCase : Tuple =Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__lowercase ) )
def __lowercase ( self :str ):
__lowerCamelCase : str =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : List[str] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : List[str] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : int =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : Optional[int] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def __lowercase ( self :Any ):
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 363
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """▁"""
_UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_UpperCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_UpperCamelCase = {"""vinai/bartpho-syllable""": 1_0_2_4}
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ['input_ids', 'attention_mask']
def __init__( self , snake_case , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case = None , **snake_case , ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
lowerCAmelCase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
lowerCAmelCase__ : Union[str, Any] = vocab_file
lowerCAmelCase__ : Optional[Any] = monolingual_vocab_file
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(snake_case ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : Optional[int] = cnt
cnt += 1
with open(snake_case , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
lowerCAmelCase__ : Union[str, Any] = line.strip().split()[0]
lowerCAmelCase__ : List[str] = len(self.fairseq_tokens_to_ids )
if str(snake_case ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : int = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.__dict__.copy()
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
lowerCAmelCase__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None , snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
return self.sp_model.encode(snake_case , out_type=snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[str] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ : Optional[Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
lowerCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
snake_case ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , snake_case )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(snake_case , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(snake_case )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 453
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase = 1_6
_UpperCamelCase = 3_2
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> str:
return int(x / 2**2_0 )
class __a :
"""simple docstring"""
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase__ : str = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase__ : List[str] = torch.cuda.memory_allocated()
lowerCAmelCase__ : Optional[int] = torch.cuda.max_memory_allocated()
lowerCAmelCase__ : List[str] = bamb(self.end - self.begin )
lowerCAmelCase__ : Union[str, Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ = 1_6 , lowercase__ = "bert-base-cased" , lowercase__ = 3_2_0 , lowercase__ = 1_6_0 , ) -> str:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase__ : Optional[Any] = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : int = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(lowercase__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase__ : Tuple = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase__ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Dict:
# Initialize accelerator
lowerCAmelCase__ : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Union[str, Any] = config["lr"]
lowerCAmelCase__ : int = int(config["num_epochs"] )
lowerCAmelCase__ : Tuple = int(config["seed"] )
lowerCAmelCase__ : str = int(config["batch_size"] )
lowerCAmelCase__ : Any = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Any = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase__ : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ : Any = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[str] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase__ : Optional[int] = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : str = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ : Tuple = 0
# Now we train the model
lowerCAmelCase__ : List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase__ : Optional[Any] = model(**lowercase__ )
lowerCAmelCase__ : Optional[int] = outputs.loss
lowerCAmelCase__ : Any = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase__ : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCAmelCase__ : int = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=lowercase__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase__ , )
parser.add_argument(
"--output_dir" , type=lowercase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=lowercase__ , default=lowercase__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=lowercase__ , default=3_2_0 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=lowercase__ , default=1_6_0 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of train epochs." , )
lowerCAmelCase__ : Optional[int] = parser.parse_args()
lowerCAmelCase__ : Dict = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 453
| 1
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCAmelCase ( A : float , A : float , A : bool = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A ), magnitude * sin(A )]
return [magnitude * cos(radians(A ) ), magnitude * sin(radians(A ) )]
def UpperCAmelCase ( A : NDArray[floataa] , A : NDArray[floataa] , A : float = 10**-1 ):
'''simple docstring'''
_UpperCAmelCase = cross(A , A )
_UpperCAmelCase = sum(A )
return abs(A ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24
| 0
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__UpperCAmelCase = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
def UpperCAmelCase ( self : int , a_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(a_ , a_ ):
a__ : Any = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , a_ : Tuple , a_ : Optional[Any] , a_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(a_ ) )
if isinstance(a_ , a_ ):
a__ : str = [sequences]
a__ : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *a_ : Tuple , **a_ : str ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def UpperCAmelCase ( self : Optional[int] , a_ : List[Any] , a_ : int=True , a_ : Tuple=True , a_ : Tuple=TruncationStrategy.ONLY_FIRST , **a_ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : str = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
a__ : List[str] = self.tokenizer.eos_token
try:
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self : Tuple , **a_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if kwargs.get("multi_class" , a_ ) is not None:
a__ : str = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
a__ : Tuple = {}
if "candidate_labels" in kwargs:
a__ : Any = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
a__ : str = kwargs["hypothesis_template"]
a__ : Tuple = {}
if "multi_label" in kwargs:
a__ : Dict = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : str , a_ : Union[str, List[str]] , *a_ : List[str] , **a_ : List[Any] , ) -> Tuple:
'''simple docstring'''
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
a__ : Any = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(a_ , **a_ )
def UpperCAmelCase ( self : Optional[int] , a_ : Tuple , a_ : Any=None , a_ : Dict="This example is {}." ) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
a__ : Union[str, Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def UpperCAmelCase ( self : Optional[int] , a_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Dict = inputs["candidate_label"]
a__ : Optional[int] = inputs["sequence"]
a__ : Optional[int] = {k: inputs[k] for k in self.tokenizer.model_input_names}
a__ : int = self.model(**a_ )
a__ : Optional[int] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self : Dict , a_ : Any , a_ : List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = [outputs["candidate_label"] for outputs in model_outputs]
a__ : Optional[int] = [outputs["sequence"] for outputs in model_outputs]
a__ : Union[str, Any] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
a__ : List[str] = logits.shape[0]
a__ : Optional[int] = len(a_ )
a__ : List[str] = N // n
a__ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
a__ : str = self.entailment_id
a__ : str = -1 if entailment_id == 0 else 0
a__ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
a__ : List[Any] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
a__ : str = reshaped_outputs[..., self.entailment_id]
a__ : Optional[int] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 642
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCAmelCase = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__UpperCAmelCase = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def UpperCAmelCase ( self : Tuple , a_ : Optional[Any] , a_ : Optional[Any] , a_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(a_ , a_ , sample_weight=a_ ) ),
}
| 642
| 1
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
__a : Tuple = None
def A__ ( self ) ->Any:
UpperCAmelCase__ :List[str] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ :Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A )
def A__ ( self ) ->Union[str, Any]:
UpperCAmelCase__ :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ :Tuple = os.path.join(A , 'feat_extract.json' )
feat_extract_first.to_json_file(A )
UpperCAmelCase__ :Optional[Any] = self.feature_extraction_class.from_json_file(A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ :Union[str, Any] = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
UpperCAmelCase__ :List[str] = self.feature_extraction_class.from_pretrained(A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :str = self.feature_extraction_class()
self.assertIsNotNone(A )
| 433
|
from math import isqrt
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :str = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :List[Any] = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def A ( SCREAMING_SNAKE_CASE = 10**8 ):
"""simple docstring"""
UpperCAmelCase__ :Any = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ :Optional[Any] = 0
UpperCAmelCase__ :List[str] = 0
UpperCAmelCase__ :Dict = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 433
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase: Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase: List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase: Optional[int] = strtobool(snake_case_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
_snake_case : Dict = parse_flag_from_env('RUN_SLOW', default=False)
_snake_case : int = parse_flag_from_env('RUN_REMOTE', default=False)
_snake_case : List[Any] = parse_flag_from_env('RUN_LOCAL', default=True)
_snake_case : List[str] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
_snake_case : Union[str, Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
_snake_case : str = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
_snake_case : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
_snake_case : Dict = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
_snake_case : Optional[int] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
_snake_case : int = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
_snake_case : Union[str, Any] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __UpperCAmelCase ( snake_case_ : List[Any] ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase: Any = unittest.skip("test requires faiss" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : Optional[int] ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase: Dict = unittest.skip("test requires regex" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : Optional[int] ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase: Tuple = unittest.skip("test requires elasticsearch" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : int ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase: Optional[int] = unittest.skip("test requires sqlalchemy" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : Optional[int] ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase: List[Any] = unittest.skip("test requires PyTorch" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : Any ):
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase: int = unittest.skip("test requires TensorFlow" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : Optional[int] ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase: Union[str, Any] = unittest.skip("test requires JAX" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : str ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase: int = unittest.skip("test requires Pillow" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(snake_case_ )
else:
return test_case
def __UpperCAmelCase ( snake_case_ : str ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(snake_case_ )
else:
return test_case
def __UpperCAmelCase ( snake_case_ : str ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(snake_case_ )
else:
return test_case
def __UpperCAmelCase ( snake_case_ : Tuple ):
'''simple docstring'''
def _require_spacy_model(snake_case_ : Tuple ):
try:
import spacy # noqa F401
spacy.load(snake_case_ )
except ImportError:
return unittest.skip("test requires spacy" )(snake_case_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(snake_case_ ) )(snake_case_ )
else:
return test_case
return _require_spacy_model
def __UpperCAmelCase ( snake_case_ : str ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(snake_case_ )
else:
return test_case
def __UpperCAmelCase ( snake_case_ : List[str] ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(snake_case_ )
else:
return test_case
def __UpperCAmelCase ( snake_case_ : Dict ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase: Union[str, Any] = unittest.skip("test is slow" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : List[Any] ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase: Dict = unittest.skip("test is local" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase: Optional[Any] = unittest.skip("test is packaged" )(snake_case_ )
return test_case
def __UpperCAmelCase ( snake_case_ : Any ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase: Optional[Any] = unittest.skip("test requires remote" )(snake_case_ )
return test_case
def __UpperCAmelCase ( *snake_case_ : List[str] ):
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(snake_case_ ) and name.startswith("test" ):
for decorator in decorators:
UpperCAmelCase: Optional[Any] = decorator(snake_case_ )
setattr(cls , snake_case_ , snake_case_ )
return cls
return decorate
class __lowerCamelCase ( lowercase ):
pass
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: str = 0
lowerCamelCase__: Dict = 1
lowerCamelCase__: List[str] = 2
@contextmanager
def __UpperCAmelCase ( snake_case_ : Tuple=OfflineSimulationMode.CONNECTION_FAILS , snake_case_ : Tuple=1e-16 ):
'''simple docstring'''
UpperCAmelCase: Any = requests.Session().request
def timeout_request(snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : int , **snake_case_ : Optional[Any] ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase: List[Any] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCAmelCase: str = timeout
try:
return online_request(snake_case_ , snake_case_ , **snake_case_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase: Optional[int] = url
UpperCAmelCase: str = e.args[0]
UpperCAmelCase: Union[str, Any] = (max_retry_error.args[0].replace("10.255.255.1" , F'OfflineMock[{url}]' ),)
UpperCAmelCase: Any = (max_retry_error,)
raise
def raise_connection_error(snake_case_ : Tuple , snake_case_ : Any , **snake_case_ : Tuple ):
raise requests.ConnectionError("Offline mode is enabled." , request=snake_case_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , snake_case_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , snake_case_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , snake_case_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __UpperCAmelCase ( *snake_case_ : Optional[Any] , **snake_case_ : List[str] ):
'''simple docstring'''
UpperCAmelCase: Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*snake_case_ , **snake_case_ ) as tmp_dir:
try:
os.chdir(snake_case_ )
yield
finally:
os.chdir(snake_case_ )
@contextmanager
def __UpperCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase: int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __UpperCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase: Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : List[Any] ):
'''simple docstring'''
return deepcopy(snake_case_ ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(snake_case_ ).integers(0 , 1_0_0 , 1_0 ).tolist()
def __UpperCAmelCase ( snake_case_ : Optional[int] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(snake_case_ : Any , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple ):
try:
return func(*snake_case_ , **snake_case_ )
except HTTPError as err:
if str(snake_case_ ).startswith("500" ) or str(snake_case_ ).startswith("502" ):
pytest.xfail(str(snake_case_ ) )
raise err
return decorator.decorator(_wrapper , snake_case_ )
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: int = returncode
UpperCAmelCase: Union[str, Any] = stdout
UpperCAmelCase: int = stderr
async def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : str ):
'''simple docstring'''
while True:
UpperCAmelCase: Tuple = await stream.readline()
if line:
callback(snake_case_ )
else:
break
async def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : List[str]=None , snake_case_ : Any=None , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=False , snake_case_ : int=False ):
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(snake_case_ ) )
UpperCAmelCase: Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase: int = []
UpperCAmelCase: str = []
def tee(snake_case_ : int , snake_case_ : str , snake_case_ : str , snake_case_ : int="" ):
UpperCAmelCase: List[Any] = line.decode("utf-8" ).rstrip()
sink.append(snake_case_ )
if not quiet:
print(snake_case_ , snake_case_ , file=snake_case_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stderr , label="stderr:" ) ),
] , timeout=snake_case_ , )
return _RunOutput(await p.wait() , snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Tuple=None , snake_case_ : Optional[int]=None , snake_case_ : Any=1_8_0 , snake_case_ : List[Any]=False , snake_case_ : List[str]=True ):
'''simple docstring'''
UpperCAmelCase: str = asyncio.get_event_loop()
UpperCAmelCase: int = loop.run_until_complete(
_stream_subprocess(snake_case_ , env=snake_case_ , stdin=snake_case_ , timeout=snake_case_ , quiet=snake_case_ , echo=snake_case_ ) )
UpperCAmelCase: int = " ".join(snake_case_ )
if result.returncode > 0:
UpperCAmelCase: int = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase: Any = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
UpperCAmelCase: Dict = re.sub(r"^gw" , "" , snake_case_ , 0 , re.M )
return int(snake_case_ )
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase: int = 2_9_5_0_0
UpperCAmelCase: Dict = pytest_xdist_worker_id()
return port + uniq_delta
| 716
|
def __UpperCAmelCase ( snake_case_ : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
'''simple docstring'''
try:
UpperCAmelCase: Optional[int] = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCAmelCase: List[Any] = 2
UpperCAmelCase: Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase: Tuple = i
while n % i == 0:
UpperCAmelCase: Union[str, Any] = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 166
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : int = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = ["""GLPNFeatureExtractor"""]
_A : List[str] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_A : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 100
| 1
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=30 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=10 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=0.6 , lowerCamelCase_=None , ) -> Dict:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = mask_ratio
lowerCAmelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = TFViTMAEModel(config=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = TFViTMAEForPreTraining(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , training=lowerCamelCase_ )
# expected sequence length = num_patches
lowerCAmelCase__ = (self.image_size // self.patch_size) ** 2
lowerCAmelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFViTMAEForPreTraining(lowerCamelCase_ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(lowerCamelCase_ , training=lowerCamelCase_ )
lowerCAmelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Any = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
lowercase__ : str = False
lowercase__ : Union[str, Any] = False
lowercase__ : int = False
lowercase__ : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = TFViTMAEModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# make the mask reproducible
np.random.seed(2 )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , noise=lowerCamelCase_ )
lowerCAmelCase__ = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
lowerCAmelCase__ = outputs_dict[0].numpy()
lowerCAmelCase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# make the mask reproducible
np.random.seed(2 )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase_ ):
lowerCAmelCase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase_ ):
lowerCAmelCase__ = v.numpy()
else:
lowerCAmelCase__ = np.array(lowerCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = prepare_numpy_arrays(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , noise=lowerCamelCase_ )
lowerCAmelCase__ = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
# make masks reproducible
np.random.seed(2 )
lowerCAmelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCAmelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase__ = tf.constant(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase__ = tf_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# make mask reproducible
np.random.seed(2 )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),)
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase_ , '''_keras_serializable''' , lowerCamelCase_ )
}
lowerCAmelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase__ = tf.convert_to_tensor(lowerCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowerCAmelCase__ = main_layer_class(lowerCamelCase_ )
lowerCAmelCase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCAmelCase__ = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) )
lowerCAmelCase__ = model(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , '''keras_model.h5''' )
model.save(lowerCamelCase_ )
lowerCAmelCase__ = tf.keras.models.load_model(
lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase_ , tf.keras.Model )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# make mask reproducible
np.random.seed(2 )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase__ = outputs.last_hidden_state.numpy()
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = outputs.logits.numpy()
lowerCAmelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
lowerCAmelCase__ = model_class.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase__ = after_outputs['''last_hidden_state'''].numpy()
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = after_outputs['''logits'''].numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1e-5 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# make mask reproducible
np.random.seed(2 )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , noise=lowerCamelCase_ )
lowerCAmelCase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase_ )
lowerCAmelCase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCAmelCase__ = model_class.from_config(model.config )
lowerCAmelCase__ = new_model(lowerCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowerCAmelCase__ = new_model(lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase_ )
def _snake_case ( ) -> Optional[int]:
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCAmelCase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase__ = ViTMAEConfig()
lowerCAmelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCAmelCase__ = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
| 98
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def _snake_case ( ) -> Any:
lowerCAmelCase__ = 10
lowerCAmelCase__ = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
lowerCAmelCase__ = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(A ) ),
} , features=A , )
return dataset
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A ) -> List[Any]:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=A )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Optional[int]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
lowerCAmelCase__ = FILE_CONTENT
with open(A , '''w''' ) as f:
f.write(A )
return filename
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Union[str, Any]:
import bza
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
lowerCAmelCase__ = bytes(A , '''utf-8''' )
with bza.open(A , '''wb''' ) as f:
f.write(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Tuple:
import gzip
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
lowerCAmelCase__ = bytes(A , '''utf-8''' )
with gzip.open(A , '''wb''' ) as f:
f.write(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> List[str]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
lowerCAmelCase__ = bytes(A , '''utf-8''' )
with lza.frame.open(A , '''wb''' ) as f:
f.write(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A ) -> int:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(A , '''w''' ) as archive:
archive.write(A , arcname=os.path.basename(A ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A ) -> str:
import tarfile
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(A , '''w''' ) as f:
f.add(A , arcname=os.path.basename(A ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Optional[int]:
import lzma
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
lowerCAmelCase__ = bytes(A , '''utf-8''' )
with lzma.open(A , '''wb''' ) as f:
f.write(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A ) -> List[str]:
import zipfile
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.basename(A ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
lowerCAmelCase__ = bytes(A , '''utf-8''' )
with zstd.open(A , '''wb''' ) as f:
f.write(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Any:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
lowerCAmelCase__ = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(A , '''w''' ) as f:
f.write(A )
return filename
__UpperCAmelCase = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__UpperCAmelCase = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__UpperCAmelCase = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__UpperCAmelCase = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='''session''' )
def _snake_case ( ) -> Union[str, Any]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Optional[Any]:
lowerCAmelCase__ = datasets.Dataset.from_dict(A )
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(A ) ) as con:
lowerCAmelCase__ = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Any:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(A , '''w''' , newline='''''' ) as f:
lowerCAmelCase__ = csv.DictWriter(A , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Optional[int]:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(A , '''w''' , newline='''''' ) as f:
lowerCAmelCase__ = csv.DictWriter(A , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A ) -> Optional[int]:
import bza
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(A , '''rb''' ) as f:
lowerCAmelCase__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(A , '''wb''' ) as f:
f.write(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A ) -> List[str]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.basename(A ) )
f.write(A , arcname=os.path.basename(A ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A ) -> Dict:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(A , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A ) -> Optional[int]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.join('''main_dir''' , os.path.basename(A ) ) )
f.write(A , arcname=os.path.join('''main_dir''' , os.path.basename(A ) ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
lowerCAmelCase__ = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(A , '''wb''' ) as f:
lowerCAmelCase__ = pq.ParquetWriter(A , schema=A )
lowerCAmelCase__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(A ) )] for k in DATA[0]} , schema=A )
writer.write_table(A )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowerCAmelCase__ = {'''data''': DATA}
with open(A , '''w''' ) as f:
json.dump(A , A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowerCAmelCase__ = {'''data''': DATA_DICT_OF_LISTS}
with open(A , '''w''' ) as f:
json.dump(A , A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(A , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(A ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Optional[Any]:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(A , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(A ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(A , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(A ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> List[Any]:
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(A , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(A ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A ) -> List[str]:
import gzip
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(A , '''rb''' ) as orig_file:
with gzip.open(A , '''wb''' ) as zipped_file:
zipped_file.writelines(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A ) -> Dict:
import gzip
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(A , '''rb''' ) as orig_file:
with gzip.open(A , '''wb''' ) as zipped_file:
zipped_file.writelines(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.basename(A ) )
f.write(A , arcname=os.path.basename(A ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A , A ) -> Optional[Any]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.join('''nested''' , os.path.basename(A ) ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A ) -> List[Any]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.join('''main_dir''' , os.path.basename(A ) ) )
f.write(A , arcname=os.path.join('''main_dir''' , os.path.basename(A ) ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A ) -> List[str]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(A , '''w''' ) as f:
f.add(A , arcname=os.path.basename(A ) )
f.add(A , arcname=os.path.basename(A ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A , A ) -> Optional[int]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(A , '''w''' ) as f:
f.add(A , arcname=os.path.join('''nested''' , os.path.basename(A ) ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Any:
lowerCAmelCase__ = ['''0''', '''1''', '''2''', '''3''']
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(A , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = ['''0''', '''1''', '''2''', '''3''']
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(A , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = ['''0''', '''1''', '''2''', '''3''']
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(A , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A ) -> List[Any]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.basename(A ) )
f.write(A , arcname=os.path.basename(A ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.join('''main_dir''' , os.path.basename(A ) ) )
f.write(A , arcname=os.path.join('''main_dir''' , os.path.basename(A ) ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A , A ) -> Optional[int]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(A , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
lowerCAmelCase__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(A )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( ) -> List[str]:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def _snake_case ( ) -> Any:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def _snake_case ( A , A ) -> Union[str, Any]:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(A , '''w''' ) as f:
f.write(A , arcname=os.path.basename(A ) )
f.write(A , arcname=os.path.basename(A ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def _snake_case ( A ) -> int:
lowerCAmelCase__ = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 98
| 1
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCAmelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> Any:
lowerCAmelCase__ = None
lowerCAmelCase__ = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCAmelCase__ = os.path.abspath('''examples''' )
for item in os.listdir(lowerCamelCase_ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCamelCase_ , feature_script=lowerCamelCase_ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCAmelCase__ = compare_against_test(
os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = '''\n'''.join(lowerCamelCase_ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase__ = diff.replace(lowerCamelCase_ , '''''' )
self.assertEqual(lowerCamelCase_ , '''''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.one_complete_example('''complete_nlp_example.py''' , lowerCamelCase_ )
self.one_complete_example('''complete_nlp_example.py''' , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCAmelCase__ = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.one_complete_example('''complete_cv_example.py''' , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = False
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Optional[Any]:
super().setUpClass()
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> str:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase_ )
self.assertNotIn('''epoch 0:''' , lowerCamelCase_ )
self.assertIn('''epoch 1:''' , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase_ )
if torch.cuda.is_available():
lowerCAmelCase__ = torch.cuda.device_count()
else:
lowerCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , lowerCamelCase_ )
self.assertIn('''epoch 1:''' , lowerCamelCase_ )
else:
self.assertIn('''epoch 0:''' , lowerCamelCase_ )
self.assertIn('''epoch 1:''' , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase_ )
lowerCAmelCase__ = re.findall('''({.+})''' , lowerCamelCase_ )
lowerCAmelCase__ = [r for r in results if '''accuracy''' in r][-1]
lowerCAmelCase__ = ast.literal_eval(lowerCamelCase_ )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase__ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''tracking''' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 90
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[List[float]] = None , __a : Optional[int] = None , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__a )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" )
if i == 0:
_a, _a : Tuple = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
else:
return _interleave_iterable_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : int = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__a )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" )
if i == 0:
_a, _a : Dict = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__a , info=__a , split=__a , axis=__a )
else:
return _concatenate_iterable_datasets(__a , info=__a , split=__a , axis=__a )
| 229
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''image_processor''', '''feature_extractor''']
SCREAMING_SNAKE_CASE : Optional[int] = '''TvltImageProcessor'''
SCREAMING_SNAKE_CASE : int = '''TvltFeatureExtractor'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(image_processor=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(_SCREAMING_SNAKE_CASE , mask_pixel=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(_SCREAMING_SNAKE_CASE , is_mixed=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , mask_audio=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(_SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(_SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(_SCREAMING_SNAKE_CASE )
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 514
|
'''simple docstring'''
import torch
def snake_case ( ) -> List[str]:
"""simple docstring"""
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 514
| 1
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
a : Tuple = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
a : Optional[Any] = parser.parse_args()
if args.check_lib:
a : str = importlib.import_module('transformers')
a : List[Any] = Path(transformers_module.__file__).parent
else:
a : Tuple = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 556
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Any = XLMRobertaTokenizer
a_ : Tuple = XLMRobertaTokenizerFast
a_ : Any = True
a_ : Optional[int] = True
def _UpperCamelCase ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = '<pad>'
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def _UpperCamelCase ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCamelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _UpperCamelCase ( self : Dict ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def _UpperCamelCase ( self : Dict ):
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def _UpperCamelCase ( self : Optional[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
lowerCamelCase__ = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = 'I was born in 92000, and this is falsé.'
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = 'Hello World!'
lowerCamelCase__ = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def _UpperCamelCase ( self : str ):
# fmt: off
lowerCamelCase__ = {'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 510
| 0
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase ( __a , __a , unittest.TestCase ):
a__ :Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ :List[Any] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ :int = False
a__ :Any = False
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Union[str, Any]:
UpperCamelCase_ : Optional[int] = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
UpperCamelCase_ : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCamelCase ( __a ):
def __init__(self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ) -> List[str]:
UpperCamelCase_ : int = parent
UpperCamelCase_ : List[str] = batch_size
UpperCamelCase_ : Tuple = seq_length
UpperCamelCase_ : Dict = is_training
UpperCamelCase_ : Dict = use_input_mask
UpperCamelCase_ : Tuple = use_token_type_ids
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : str = vocab_size
UpperCamelCase_ : List[str] = hidden_size
UpperCamelCase_ : Any = num_hidden_layers
UpperCamelCase_ : Dict = num_attention_heads
UpperCamelCase_ : List[Any] = intermediate_size
UpperCamelCase_ : Union[str, Any] = hidden_act
UpperCamelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_ : Dict = max_position_embeddings
UpperCamelCase_ : str = type_vocab_size
UpperCamelCase_ : Optional[Any] = type_sequence_label_size
UpperCamelCase_ : Union[str, Any] = initializer_range
UpperCamelCase_ : Any = num_labels
UpperCamelCase_ : List[Any] = num_choices
UpperCamelCase_ : Dict = scope
UpperCamelCase_ : Optional[int] = embedding_size
def A_ (self ) -> Tuple:
UpperCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : Any = None
UpperCamelCase_ : Dict = None
if self.use_labels:
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : Tuple = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
UpperCamelCase_ : Union[str, Any] = TFMobileBertModel(config=__UpperCamelCase )
UpperCamelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : List[Any] = model(__UpperCamelCase )
UpperCamelCase_ : Tuple = [input_ids, input_mask]
UpperCamelCase_ : List[Any] = model(__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
UpperCamelCase_ : int = TFMobileBertForMaskedLM(config=__UpperCamelCase )
UpperCamelCase_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
UpperCamelCase_ : List[str] = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
UpperCamelCase_ : Any = TFMobileBertForPreTraining(config=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
UpperCamelCase_ : List[str] = self.num_labels
UpperCamelCase_ : List[Any] = TFMobileBertForSequenceClassification(config=__UpperCamelCase )
UpperCamelCase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
UpperCamelCase_ : List[Any] = self.num_choices
UpperCamelCase_ : Optional[int] = TFMobileBertForMultipleChoice(config=__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : str = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : List[str] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
UpperCamelCase_ : List[Any] = self.num_labels
UpperCamelCase_ : Optional[Any] = TFMobileBertForTokenClassification(config=__UpperCamelCase )
UpperCamelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
UpperCamelCase_ : Tuple = TFMobileBertForQuestionAnswering(config=__UpperCamelCase )
UpperCamelCase_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ (self ) -> Dict:
UpperCamelCase_ : Any = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),
) : Tuple = config_and_inputs
UpperCamelCase_ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def A_ (self ) -> str:
UpperCamelCase_ : Tuple = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase_ : str = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def A_ (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A_ (self ) -> Any:
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase )
def A_ (self ) -> int:
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase )
def A_ (self ) -> Dict:
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase )
def A_ (self ) -> Tuple:
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase )
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase )
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase )
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase )
def A_ (self ) -> List[str]:
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase )
@slow
def A_ (self ) -> List[Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase_ : int = TFMobileBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Optional[Any] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
UpperCamelCase_ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_ : Union[str, Any] = model(__UpperCamelCase )[0]
UpperCamelCase_ : Dict = [1, 6, 30_522]
self.assertEqual(output.shape , __UpperCamelCase )
UpperCamelCase_ : Any = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
| 138
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
SCREAMING_SNAKE_CASE : List[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138
| 1
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = s.rsplit(__UpperCamelCase , __UpperCamelCase )
return new.join(__UpperCamelCase )
def A ( __UpperCamelCase ) -> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( __UpperCamelCase ) -> List[str]:
A__ = {}
A__ = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
A__ = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
A__ = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
A__ = rreplace(__UpperCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
A__ = rreplace(__UpperCamelCase , '.b' , '.bias' , 1 )
A__ = value.float()
return upgrade
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=True ) -> Optional[Any]:
from dall_e import Encoder
A__ = Encoder()
if os.path.exists(__UpperCamelCase ):
A__ = torch.load(__UpperCamelCase )
else:
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A__ = ckpt.state_dict()
encoder.load_state_dict(__UpperCamelCase )
if config_path is not None:
A__ = FlavaImageCodebookConfig.from_pretrained(__UpperCamelCase )
else:
A__ = FlavaImageCodebookConfig()
A__ = FlavaImageCodebook(__UpperCamelCase ).eval()
A__ = encoder.state_dict()
A__ = upgrade_state_dict(__UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
A__ = hf_model.state_dict()
A__ = count_parameters(__UpperCamelCase )
A__ = count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__UpperCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 9
|
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9
| 1
|
from __future__ import annotations
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = get_failure_array(A )
# 2) Step through text searching for pattern
UpperCamelCase__ , UpperCamelCase__ = 0, 0 # index into text, pattern
while i < len(A ):
if pattern[j] == text[i]:
if j == (len(A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase__ = failure[j - 1]
continue
i += 1
return False
def __UpperCamelCase ( A ):
UpperCamelCase__ = [0]
UpperCamelCase__ = 0
UpperCamelCase__ = 1
while j < len(A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase__ = failure[i - 1]
continue
j += 1
failure.append(A )
return failure
if __name__ == "__main__":
# Test 1)
__magic_name__ ='''abc1abc12'''
__magic_name__ ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__magic_name__ ='''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__magic_name__ ='''ABABX'''
__magic_name__ ='''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__magic_name__ ='''AAAB'''
__magic_name__ ='''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__magic_name__ ='''abcdabcy'''
__magic_name__ ='''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__magic_name__ ='''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 469
|
def __UpperCamelCase ( A = 600851475143 ):
try:
UpperCamelCase__ = int(A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
UpperCamelCase__ = 1
UpperCamelCase__ = 2
while i * i <= n:
while n % i == 0:
UpperCamelCase__ = i
n //= i
i += 1
if n > 1:
UpperCamelCase__ = n
return int(A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 469
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __lowerCAmelCase: ArgumentParser ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self: str ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
| 221
|
def __lowerCAmelCase ( A_ : str ) -> str:
__UpperCAmelCase = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __lowerCAmelCase ( A_ : str ) -> dict[str, str]:
__UpperCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__UpperCAmelCase = remove_duplicates(key.upper() )
__UpperCAmelCase = len(A_ )
# First fill cipher with key characters
__UpperCAmelCase = {alphabet[i]: char for i, char in enumerate(A_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A_ ) , 26 ):
__UpperCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__UpperCAmelCase = alphabet[i - offset]
__UpperCAmelCase = char
return cipher_alphabet
def __lowerCAmelCase ( A_ : str , A_ : dict[str, str] ) -> str:
return "".join(cipher_map.get(A_ , A_ ) for ch in message.upper() )
def __lowerCAmelCase ( A_ : str , A_ : dict[str, str] ) -> str:
__UpperCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A_ , A_ ) for ch in message.upper() )
def __lowerCAmelCase ( ) -> None:
__UpperCAmelCase = input("Enter message to encode or decode: " ).strip()
__UpperCAmelCase = input("Enter keyword: " ).strip()
__UpperCAmelCase = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
__UpperCAmelCase = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
__UpperCAmelCase = create_cipher_map(A_ )
print(func(A_ , A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 221
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( A : float , A : float , A : float , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase=None , **_UpperCAmelCase):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''')
lowerCAmelCase_ = model
lowerCAmelCase_ = kwargs.get('''model_save_dir''' , _UpperCAmelCase)
lowerCAmelCase_ = kwargs.get('''latest_model_name''' , _UpperCAmelCase)
def __call__( self , **_UpperCAmelCase):
lowerCAmelCase_ = {k: np.array(_UpperCAmelCase) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase)
@staticmethod
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''')
lowerCAmelCase_ = '''CPUExecutionProvider'''
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase_ = self.model_save_dir.joinpath(self.latest_model_name)
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase_ = self.model_save_dir.joinpath(_UpperCAmelCase)
if src_path.exists():
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
def lowercase__ ( self , _UpperCAmelCase , **_UpperCAmelCase , ):
if os.path.isfile(_UpperCAmelCase):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file')
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase):
lowerCAmelCase_ = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
lowerCAmelCase_ = Path(_UpperCAmelCase)
# load model from hub
else:
# download model
lowerCAmelCase_ = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
lowerCAmelCase_ = Path(_UpperCAmelCase).parent
lowerCAmelCase_ = Path(_UpperCAmelCase).name
lowerCAmelCase_ = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
return cls(model=_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = None
if len(str(_UpperCAmelCase).split('''@''')) == 2:
lowerCAmelCase_ , lowerCAmelCase_ = model_id.split('''@''')
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 413
| 1
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = Sql(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , sql=__SCREAMING_SNAKE_CASE , con=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , )
# Build dataset for splits
lowerCAmelCase__ : List[str] = self.builder.as_dataset(
split='''train''' , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
lowerCAmelCase__ : Dict = dataset
lowerCAmelCase__ : Any = name
lowerCAmelCase__ : Tuple = con
lowerCAmelCase__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCAmelCase__ : Optional[int] = num_proc
lowerCAmelCase__ : str = to_sql_kwargs
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.to_sql_kwargs.pop('''sql''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = self.to_sql_kwargs.pop('''con''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = self.to_sql_kwargs.pop('''index''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self._write(index=__SCREAMING_SNAKE_CASE , **self.to_sql_kwargs )
return written
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = args
lowerCAmelCase__ : List[str] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
lowerCAmelCase__ : List[str] = query_table(
table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCAmelCase__ : int = batch.to_pandas()
lowerCAmelCase__ : Optional[Any] = df.to_sql(self.name , self.con , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return num_rows or len(__SCREAMING_SNAKE_CASE )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowerCAmelCase__ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 233
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38
| 0
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 100_0000 ):
A__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCAmelCase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 702
|
"""simple docstring"""
import math
import sys
def _snake_case ( UpperCAmelCase_ : int ):
if number != int(UpperCAmelCase_ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
A__ = [-1] * (number + 1)
A__ = 0
for i in range(1 , number + 1 ):
A__ = sys.maxsize
A__ = int(math.sqrt(UpperCAmelCase_ ) )
for j in range(1 , root + 1 ):
A__ = 1 + answers[i - (j**2)]
A__ = min(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500
| 0
|
import re
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
UpperCamelCase__ = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(a__ , a__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 619
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCamelCase__ = VideoClassificationPipeline(model=__lowerCAmelCase , image_processor=__lowerCAmelCase , top_k=2 )
UpperCamelCase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
for example in examples:
UpperCamelCase__ = video_classifier(__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""score""": ANY(__lowerCAmelCase ), """label""": ANY(__lowerCAmelCase )},
{"""score""": ANY(__lowerCAmelCase ), """label""": ANY(__lowerCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
UpperCamelCase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
UpperCamelCase__ = pipeline(
"""video-classification""" , model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , frame_sampling_rate=4 )
UpperCamelCase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCamelCase__ = video_classifier(__lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
UpperCamelCase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def _lowerCamelCase ( self ):
pass
| 619
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCAmelCase__ = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__snake_case ) , torch_builtin(__snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(__snake_case ) , gelu_new(__snake_case ) ) )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCAmelCase__ = get_activation("gelu" )
UpperCAmelCase__ = get_activation("gelu_10" )
UpperCAmelCase__ = torch_builtin(__snake_case )
UpperCAmelCase__ = geluaa(__snake_case )
UpperCAmelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__snake_case ):
get_activation("bogus" )
with self.assertRaises(__snake_case ):
get_activation(__snake_case )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = get_activation("gelu" )
UpperCAmelCase__ = 1
UpperCAmelCase__ = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__snake_case ):
UpperCAmelCase__ = acta.a
| 705
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A = logging.getLogger(__name__)
@dataclass
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
A__= field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to SortishSamler or not.'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
A__= field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'whether to use adafactor'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
A__= field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Dropout probability. Goes into model.config.'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
A__= field(
default='linear' , metadata={'help': f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 277
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = ['''input_features''', '''attention_mask''']
def __init__( self : Optional[int] , _UpperCAmelCase : Any=80 , _UpperCAmelCase : int=16_000 , _UpperCAmelCase : Optional[int]=80 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : List[Any] , ):
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_A = num_mel_bins
_A = do_ceptral_normalize
_A = normalize_means
_A = normalize_vars
_A = True
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Any , ):
_A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_A = torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
_A = ta_kaldi.fbank(__SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str = True , _UpperCAmelCase : List[Any] = True , _UpperCAmelCase : List[Any] = 0.0 , ):
if normalize_means:
_A = x[:input_length].mean(axis=0 )
_A = np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if normalize_vars:
_A = x[:input_length].std(axis=0 )
_A = np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
_A = padding_value
# make sure array is in float32
_A = x.astype(np.floataa )
return x
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple = None ):
_A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
def __call__( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] = False , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : List[str] = False , _UpperCAmelCase : int = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : Tuple = None , _UpperCAmelCase : Tuple = None , **_UpperCAmelCase : Any , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_A = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
_A = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [raw_speech]
# extract fbank features
_A = [self._extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
_A = BatchFeature({'input_features': features} )
_A = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
_A = padded_inputs.get('input_features' )
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
_A = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
_A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_A = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_A = (
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_A = self.normalize(
padded_inputs['input_features'] , attention_mask=__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 7
|
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24
| 0
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> int:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("String lengths must match!" )
_UpperCAmelCase : List[Any] = 0
for chara, chara in zip(lowerCAmelCase , lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 467
|
import logging
import os
from .state import PartialState
class a ( logging.LoggerAdapter ):
@staticmethod
def _UpperCAmelCase ( A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase ( self , A_ , A_ , *A_ , **A_ ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
_UpperCAmelCase : Tuple = kwargs.pop("main_process_only" , A_ )
_UpperCAmelCase : int = kwargs.pop("in_order" , A_ )
if self.isEnabledFor(A_ ):
if self._should_log(A_ ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
elif in_order:
_UpperCAmelCase : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str = None ) -> List[Any]:
if log_level is None:
_UpperCAmelCase : List[str] = os.environ.get("ACCELERATE_LOG_LEVEL" , lowerCAmelCase )
_UpperCAmelCase : str = logging.getLogger(lowerCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase , {} )
| 467
| 1
|
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , lowercase__ : Union[str, Any] , lowercase__ : int=2 , lowercase__ : Dict=5_6 , lowercase__ : Dict=True , lowercase__ : Optional[Any]=True , lowercase__ : int=True , lowercase__ : Optional[Any]=True , lowercase__ : List[str]=9_9 , lowercase__ : Optional[Any]=3_2 , lowercase__ : List[str]=2 , lowercase__ : Any=2 , lowercase__ : Any=7 , lowercase__ : Tuple="gelu_new" , lowercase__ : List[Any]=0.1 , lowercase__ : Optional[int]=0.1 , lowercase__ : Union[str, Any]=5_1_2 , lowercase__ : Optional[int]=1_6 , lowercase__ : str=2 , lowercase__ : Dict=0.0_2 , lowercase__ : str=4 , lowercase__ : Optional[Any]="block_sparse" , lowercase__ : Any=True , lowercase__ : Dict=False , lowercase__ : List[Any]=2 , lowercase__ : int=3 , ):
__lowercase : int = parent
__lowercase : str = batch_size
__lowercase : List[Any] = seq_length
__lowercase : Optional[Any] = is_training
__lowercase : str = use_attention_mask
__lowercase : Any = use_token_type_ids
__lowercase : Union[str, Any] = use_labels
__lowercase : Any = vocab_size
__lowercase : List[str] = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Optional[int] = intermediate_size
__lowercase : Optional[int] = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : List[str] = type_sequence_label_size
__lowercase : List[Any] = initializer_range
__lowercase : Optional[int] = num_choices
__lowercase : List[str] = rescale_embeddings
__lowercase : Any = attention_type
__lowercase : Optional[Any] = use_bias
__lowercase : int = block_size
__lowercase : Optional[int] = num_random_blocks
def snake_case ( self : int ):
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_attention_mask:
__lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Dict = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self : List[str] ):
__lowercase : Any = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase : Dict = config_and_inputs
__lowercase : Any = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
def snake_case ( self : List[str] ):
__lowercase : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self : Optional[int] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self : Tuple ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self : str ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self : int ):
super().test_hidden_states_output()
@slow
def snake_case ( self : Tuple ):
for model_class_name in self.all_model_classes:
__lowercase : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase__ )
def snake_case ( self : Union[str, Any] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self : str ):
__lowercase ,__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Optional[Any] = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : Union[str, Any] = model_class(lowercase__ )
@jax.jit
def model_jitted(lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=None , **lowercase__ : Optional[int] ):
return model(input_ids=lowercase__ , attention_mask=lowercase__ , **lowercase__ )
with self.subTest("JIT Enabled" ):
__lowercase : int = model_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Union[str, Any] = model_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : Any , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=1e-5 , lowercase__ : Union[str, Any]="outputs" , lowercase__ : Union[str, Any]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
| 575
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : Dict = 1_6
__A : str = 3_2
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = 16 ) ->Dict:
"""simple docstring"""
__lowercase : int = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowercase : str = load_dataset("glue", "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : Any = tokenizer(examples["sentence1"], examples["sentence2"], truncation=_lowerCamelCase, max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase : Optional[Any] = datasets.map(
_lowerCamelCase, batched=_lowerCamelCase, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase : Tuple = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase : str = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase : Optional[int] = 16
elif accelerator.mixed_precision != "no":
__lowercase : Tuple = 8
else:
__lowercase : Dict = None
return tokenizer.pad(
_lowerCamelCase, padding="longest", max_length=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_tensors="pt", )
# Instantiate dataloaders.
__lowercase : Optional[int] = DataLoader(
tokenized_datasets["train"], shuffle=_lowerCamelCase, collate_fn=_lowerCamelCase, batch_size=_lowerCamelCase )
__lowercase : List[Any] = DataLoader(
tokenized_datasets["validation"], shuffle=_lowerCamelCase, collate_fn=_lowerCamelCase, batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A : int = mocked_dataloaders # noqa: F811
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS", _lowerCamelCase ) == "1":
__lowercase : List[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__lowercase : Dict = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
__lowercase : List[str] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase : Optional[Any] = config["lr"]
__lowercase : Optional[int] = int(config["num_epochs"] )
__lowercase : Union[str, Any] = int(config["seed"] )
__lowercase : str = int(config["batch_size"] )
set_seed(_lowerCamelCase )
__lowercase ,__lowercase : Optional[int] = get_dataloaders(_lowerCamelCase, _lowerCamelCase )
__lowercase : Optional[Any] = evaluate.load("glue", "mrpc" )
# If the batch size is too big we use gradient accumulation
__lowercase : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowercase : Dict = batch_size // MAX_GPU_BATCH_SIZE
__lowercase : Dict = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowercase : Optional[Any] = AdamW(params=model.parameters(), lr=_lowerCamelCase )
# Instantiate scheduler
__lowercase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase, num_warmup_steps=1_00, num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : List[str] = accelerator.prepare(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__lowercase : Union[str, Any] = os.path.split(_lowerCamelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCamelCase, _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__lowercase : Any = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase : Dict = model(**_lowerCamelCase )
__lowercase : Union[str, Any] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__lowercase : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__lowercase : Dict = model(**_lowerCamelCase )
__lowercase : Tuple = outputs.logits.argmax(dim=-1 )
__lowercase ,__lowercase : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase, references=_lowerCamelCase, )
__lowercase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', _lowerCamelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCamelCase ),
"epoch": epoch,
}, step=_lowerCamelCase, )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def snake_case__ ( ) ->List[Any]:
"""simple docstring"""
__lowercase : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=_lowerCamelCase, default=_lowerCamelCase, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=_lowerCamelCase, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
__lowercase : Tuple = parser.parse_args()
__lowercase : List[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase, _lowerCamelCase )
if __name__ == "__main__":
main()
| 575
| 1
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowercase : List[Any] = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowercase : Dict = '</w>'
lowercase : Tuple = '@@ '
def __a ( A__ ) -> int:
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
return pairs
# Speech2Text2 has no max input length
lowercase : List[str] = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str="<s>" , SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE : Optional[int]="</s>" , SCREAMING_SNAKE_CASE : Any="<unk>" , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : str=None , **SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(
unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase = json.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
lowerCAmelCase = None
lowerCAmelCase = None
else:
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
lowerCAmelCase = merges_handle.read().split("\n" )[:-1]
lowerCAmelCase = [tuple(merge.split()[:2] ) for merge in merges]
lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = {}
@property
def __A ( self : Tuple ) -> int:
"""simple docstring"""
return len(self.decoder )
def __A ( self : List[str] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self : int , SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCAmelCase = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowerCAmelCase = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(SCREAMING_SNAKE_CASE )
lowerCAmelCase = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowerCAmelCase = get_pairs(SCREAMING_SNAKE_CASE )
lowerCAmelCase = " ".join(SCREAMING_SNAKE_CASE )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCAmelCase = "\n" + BPE_TOKEN_MERGES
if word.endswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = word.replace(SCREAMING_SNAKE_CASE , "" )
lowerCAmelCase = word.replace(" " , SCREAMING_SNAKE_CASE )
lowerCAmelCase = word
return word
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
lowerCAmelCase = text.lower()
lowerCAmelCase = text.split()
lowerCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
lowerCAmelCase = self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token )
return result
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = " ".join(SCREAMING_SNAKE_CASE )
# make sure @@ tokens are concatenated
lowerCAmelCase = "".join(string.split(SCREAMING_SNAKE_CASE ) )
return string
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + "\n" )
lowerCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase = token_index
writer.write(" ".join(SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 159
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase : Union[str, Any] = TypeVar('T')
class _lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : list[T] , SCREAMING_SNAKE_CASE : Callable[[T, T], T] ) -> None:
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = [any_type for _ in range(self.N )] + arr
lowerCAmelCase = fnc
self.build()
def __A ( self : Dict ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
p += self.N
lowerCAmelCase = v
while p > 1:
lowerCAmelCase = p // 2
lowerCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> T | None: # noqa: E741
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = l + self.N, r + self.N
lowerCAmelCase = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[l] )
if r % 2 == 0:
lowerCAmelCase = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[r] )
lowerCAmelCase , lowerCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase : List[Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase : Dict = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase : Optional[Any] = SegmentTree(test_array, min)
lowercase : Union[str, Any] = SegmentTree(test_array, max)
lowercase : Tuple = SegmentTree(test_array, lambda a, b: a + b)
def __a ( ) -> None:
for i in range(len(A__ ) ):
for j in range(A__ , len(A__ ) ):
lowerCAmelCase = reduce(A__ , test_array[i : j + 1] )
lowerCAmelCase = reduce(A__ , test_array[i : j + 1] )
lowerCAmelCase = reduce(lambda A__ , A__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(A__ , A__ )
assert max_range == max_segment_tree.query(A__ , A__ )
assert sum_range == sum_segment_tree.query(A__ , A__ )
test_all_segments()
for index, value in test_updates.items():
lowercase : List[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 159
| 1
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def a ( a , a="shi-labs/oneformer_demo" ) ->List[Any]:
'''simple docstring'''
with open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE = json.load(a )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for key, info in class_info.items():
SCREAMING_SNAKE_CASE = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(a ) )
SCREAMING_SNAKE_CASE = thing_ids
SCREAMING_SNAKE_CASE = class_names
return metadata
class lowerCamelCase ( unittest.TestCase ):
def __init__( self :Optional[int] , lowercase :Optional[Any] , lowercase :Tuple=7 , lowercase :Tuple=3 , lowercase :Optional[Any]=3_0 , lowercase :Optional[Any]=4_0_0 , lowercase :Any=None , lowercase :Union[str, Any]=True , lowercase :Optional[int]=True , lowercase :str=[0.5, 0.5, 0.5] , lowercase :Optional[Any]=[0.5, 0.5, 0.5] , lowercase :Union[str, Any]=1_0 , lowercase :Optional[Any]=False , lowercase :Any=2_5_5 , lowercase :List[str]="shi-labs/oneformer_demo" , lowercase :str="ade20k_panoptic.json" , lowercase :str=1_0 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = class_info_file
SCREAMING_SNAKE_CASE = prepare_metadata(lowercase , lowercase )
SCREAMING_SNAKE_CASE = num_text
SCREAMING_SNAKE_CASE = repo_path
# for the post_process_functions
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 1_0
SCREAMING_SNAKE_CASE = 1_0
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = do_reduce_labels
SCREAMING_SNAKE_CASE = ignore_index
def snake_case__ ( self :Dict ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def snake_case__ ( self :List[Any] , lowercase :List[str] , lowercase :List[str]=False ) -> Optional[Any]:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(lowercase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(lowercase , key=lambda lowercase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
def snake_case__ ( self :str ) -> List[str]:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCamelCase ( __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCamelCase_ : Any = image_processing_class
def snake_case__ ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = OneFormerImageProcessorTester(self )
@property
def snake_case__ ( self :int ) -> List[str]:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def snake_case__ ( self :Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase , '''image_std''' ) )
self.assertTrue(hasattr(lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase , '''size''' ) )
self.assertTrue(hasattr(lowercase , '''ignore_index''' ) )
self.assertTrue(hasattr(lowercase , '''class_info_file''' ) )
self.assertTrue(hasattr(lowercase , '''num_text''' ) )
self.assertTrue(hasattr(lowercase , '''repo_path''' ) )
self.assertTrue(hasattr(lowercase , '''metadata''' ) )
self.assertTrue(hasattr(lowercase , '''do_reduce_labels''' ) )
def snake_case__ ( self :Dict ) -> List[Any]:
"""simple docstring"""
pass
def snake_case__ ( self :Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowercase , batched=lowercase )
SCREAMING_SNAKE_CASE = image_processor(
lowercase , ['''semantic'''] * len(lowercase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self :Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowercase , batched=lowercase )
SCREAMING_SNAKE_CASE = image_processor(
lowercase , ['''semantic'''] * len(lowercase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowercase , batched=lowercase )
SCREAMING_SNAKE_CASE = image_processor(
lowercase , ['''semantic'''] * len(lowercase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self :Any , lowercase :List[str]=False , lowercase :Any=False , lowercase :List[Any]="np" ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
SCREAMING_SNAKE_CASE = self.image_processing_tester.num_labels
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase )
if with_segmentation_maps:
SCREAMING_SNAKE_CASE = num_labels
if is_instance_map:
SCREAMING_SNAKE_CASE = list(range(lowercase ) ) * 2
SCREAMING_SNAKE_CASE = dict(enumerate(lowercase ) )
SCREAMING_SNAKE_CASE = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
SCREAMING_SNAKE_CASE = [Image.fromarray(lowercase ) for annotation in annotations]
SCREAMING_SNAKE_CASE = image_processor(
lowercase , ['''semantic'''] * len(lowercase ) , lowercase , return_tensors='''pt''' , instance_id_to_semantic_id=lowercase , pad_and_return_pixel_mask=lowercase , )
return inputs
def snake_case__ ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self :List[str] ) -> str:
"""simple docstring"""
def common(lowercase :Optional[int]=False , lowercase :int=None ):
SCREAMING_SNAKE_CASE = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowercase , is_instance_map=lowercase , segmentation_type=lowercase )
SCREAMING_SNAKE_CASE = inputs['''mask_labels''']
SCREAMING_SNAKE_CASE = inputs['''class_labels''']
SCREAMING_SNAKE_CASE = inputs['''pixel_values''']
SCREAMING_SNAKE_CASE = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(lowercase , lowercase , lowercase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowercase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowercase )
common(is_instance_map=lowercase , segmentation_type='''pil''' )
common(is_instance_map=lowercase , segmentation_type='''pil''' )
def snake_case__ ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.zeros((2_0, 5_0) )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = binary_mask_to_rle(lowercase )
self.assertEqual(len(lowercase ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def snake_case__ ( self :Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(lowercase )
self.assertEqual(len(lowercase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
SCREAMING_SNAKE_CASE = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(lowercase , target_sizes=lowercase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def snake_case__ ( self :List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE = image_processor.post_process_instance_segmentation(lowercase , threshold=0 )
self.assertTrue(len(lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , lowercase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def snake_case__ ( self :Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE = image_processor.post_process_panoptic_segmentation(lowercase , threshold=0 )
self.assertTrue(len(lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , lowercase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 201
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 201
| 1
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Optional[Any] = '''align_text_model'''
def __init__( self , __snake_case=3_0_5_2_2 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-1_2 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , **__snake_case , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__A )
UpperCAmelCase: Optional[Any] = vocab_size
UpperCAmelCase: Tuple = hidden_size
UpperCAmelCase: str = num_hidden_layers
UpperCAmelCase: str = num_attention_heads
UpperCAmelCase: Optional[Any] = hidden_act
UpperCAmelCase: List[Any] = intermediate_size
UpperCAmelCase: int = hidden_dropout_prob
UpperCAmelCase: Any = attention_probs_dropout_prob
UpperCAmelCase: List[str] = max_position_embeddings
UpperCAmelCase: Any = type_vocab_size
UpperCAmelCase: List[Any] = initializer_range
UpperCAmelCase: List[Any] = layer_norm_eps
UpperCAmelCase: int = position_embedding_type
UpperCAmelCase: int = use_cache
UpperCAmelCase: Union[str, Any] = pad_token_id
@classmethod
def A__ ( cls , __snake_case , **__snake_case ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
UpperCAmelCase , UpperCAmelCase: Optional[Any] = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase: str = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Optional[Any] = '''align_vision_model'''
def __init__( self , __snake_case = 3 , __snake_case = 6_0_0 , __snake_case = 2.0 , __snake_case = 3.1 , __snake_case = 8 , __snake_case = [3, 3, 5, 3, 5, 5, 3] , __snake_case = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case = [] , __snake_case = [1, 2, 2, 2, 1, 2, 1] , __snake_case = [1, 2, 2, 3, 3, 4, 1] , __snake_case = [1, 6, 6, 6, 6, 6, 6] , __snake_case = 0.25 , __snake_case = "swish" , __snake_case = 2_5_6_0 , __snake_case = "mean" , __snake_case = 0.02 , __snake_case = 0.0_01 , __snake_case = 0.99 , __snake_case = 0.2 , **__snake_case , ) -> str:
"""simple docstring"""
super().__init__(**__A )
UpperCAmelCase: Optional[Any] = num_channels
UpperCAmelCase: str = image_size
UpperCAmelCase: str = width_coefficient
UpperCAmelCase: Any = depth_coefficient
UpperCAmelCase: List[str] = depth_divisor
UpperCAmelCase: Optional[int] = kernel_sizes
UpperCAmelCase: Union[str, Any] = in_channels
UpperCAmelCase: Optional[Any] = out_channels
UpperCAmelCase: Union[str, Any] = depthwise_padding
UpperCAmelCase: Optional[int] = strides
UpperCAmelCase: List[Any] = num_block_repeats
UpperCAmelCase: List[Any] = expand_ratios
UpperCAmelCase: Dict = squeeze_expansion_ratio
UpperCAmelCase: int = hidden_act
UpperCAmelCase: List[str] = hidden_dim
UpperCAmelCase: Union[str, Any] = pooling_type
UpperCAmelCase: Union[str, Any] = initializer_range
UpperCAmelCase: Optional[int] = batch_norm_eps
UpperCAmelCase: str = batch_norm_momentum
UpperCAmelCase: List[str] = drop_connect_rate
UpperCAmelCase: Dict = sum(__A ) * 4
@classmethod
def A__ ( cls , __snake_case , **__snake_case ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
UpperCAmelCase , UpperCAmelCase: str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase: Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Optional[int] = '''align'''
lowerCamelCase__: Union[str, Any] = True
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=6_4_0 , __snake_case=1.0 , __snake_case=0.02 , **__snake_case , ) -> str:
"""simple docstring"""
super().__init__(**__A )
if text_config is None:
UpperCAmelCase: Optional[int] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
UpperCAmelCase: List[Any] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
UpperCAmelCase: str = AlignTextConfig(**__A )
UpperCAmelCase: Optional[Any] = AlignVisionConfig(**__A )
UpperCAmelCase: Tuple = projection_dim
UpperCAmelCase: List[Any] = temperature_init_value
UpperCAmelCase: Any = initializer_range
@classmethod
def A__ ( cls , __snake_case , __snake_case , **__snake_case ) -> Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase: Any = copy.deepcopy(self.__dict__ )
UpperCAmelCase: Union[str, Any] = self.text_config.to_dict()
UpperCAmelCase: str = self.vision_config.to_dict()
UpperCAmelCase: Dict = self.__class__.model_type
return output
| 718
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Any = '''new-model'''
if is_tf_available():
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: int = NewModelConfig
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: int = "bert-base-cased"
UpperCAmelCase: Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: int = TFAutoModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase: Optional[int] = "bert-base-cased"
UpperCAmelCase: Any = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: Tuple = TFAutoModelForPreTraining.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: str = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case )
UpperCAmelCase , UpperCAmelCase: Optional[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: List[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case )
UpperCAmelCase , UpperCAmelCase: Any = TFAutoModelForMaskedLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> str:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: str = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: int = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case )
UpperCAmelCase , UpperCAmelCase: str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCAmelCase: str = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: Dict = TFAutoModelForSequenceClassification.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCAmelCase: Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_tensorflow_probability
def A__ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase: Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: Any = TFAutoModelForTableQuestionAnswering.from_pretrained(__snake_case )
UpperCAmelCase , UpperCAmelCase: Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: Optional[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_4_4_1_0 )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Any = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_4_4_1_0 )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase: int = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase: int = copy.deepcopy(model.config )
UpperCAmelCase: int = ["FunnelBaseModel"]
UpperCAmelCase: Optional[Any] = TFAutoModel.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
UpperCAmelCase: Dict = TFAutoModel.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register("new-model" , __snake_case )
UpperCAmelCase: Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
auto_class.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase: Dict = BertModelTester(self ).get_config()
UpperCAmelCase: str = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase: Tuple = auto_class.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
UpperCAmelCase: Dict = auto_class.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A__ ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
__snake_case , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase: Union[str, Any] = TFAutoModel.from_pretrained("bert-base" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__snake_case , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase: int = TFAutoModel.from_pretrained(__snake_case , revision="aaaaaa" )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(
__snake_case , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
UpperCAmelCase: Union[str, Any] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(__snake_case , "Use `from_pt=True` to load this model" ):
UpperCAmelCase: Any = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: str = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
UpperCAmelCase: List[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase: str = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
UpperCAmelCase: int = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 166
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Any = ShapEPipeline
_lowercase : int = ['''prompt''']
_lowercase : List[str] = ['''prompt''']
_lowercase : Union[str, Any] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Optional[Any] = False
@property
def lowerCAmelCase_ ( self : List[str] ):
return 32
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return 32
@property
def lowerCAmelCase_ ( self : Dict ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : str ):
return 8
@property
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
__A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def lowerCAmelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
__A : Optional[int] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__A : List[str] = PriorTransformer(**__A )
return model
@property
def lowerCAmelCase_ ( self : Dict ):
torch.manual_seed(0 )
__A : Any = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__A : Tuple = ShapERenderer(**__A )
return model
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Union[str, Any] = self.dummy_prior
__A : List[Any] = self.dummy_text_encoder
__A : Union[str, Any] = self.dummy_tokenizer
__A : Tuple = self.dummy_renderer
__A : Any = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=__A , clip_sample=__A , clip_sample_range=1.0 , )
__A : Tuple = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase_ ( self : List[Any] , __A : str , __A : Any=0 ):
if str(__A ).startswith("""mps""" ):
__A : int = torch.manual_seed(__A )
else:
__A : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
__A : Optional[int] = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
__A : str = """cpu"""
__A : Tuple = self.get_dummy_components()
__A : Optional[Any] = self.pipeline_class(**__A )
__A : List[str] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : Tuple = pipe(**self.get_dummy_inputs(__A ) )
__A : Tuple = output.images[0]
__A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : List[Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase_ ( self : Dict ):
__A : List[str] = torch_device == """cpu"""
__A : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__A , relax_max_difference=__A , )
def lowerCAmelCase_ ( self : Dict ):
__A : List[Any] = self.get_dummy_components()
__A : List[str] = self.pipeline_class(**__A )
__A : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : List[Any] = 1
__A : int = 2
__A : int = self.get_dummy_inputs(__A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[Any] = batch_size * [inputs[key]]
__A : Tuple = pipe(**__A , num_images_per_prompt=__A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
__A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__A : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__A : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : int = torch.Generator(device=__A ).manual_seed(0 )
__A : str = pipe(
"""a shark""" , generator=__A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__A , __A )
| 17
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'Salesforce/blip-image-captioning-base'
_snake_case : Union[str, Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
_snake_case : List[Any] = 'image_captioner'
_snake_case : Union[str, Any] = AutoModelForVisionaSeq
_snake_case : Dict = ['image']
_snake_case : Optional[int] = ['text']
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : "Image" ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0].strip()
| 98
| 0
|
'''simple docstring'''
def A__ ( _a : int , _a : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case__ : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def A__ ( _a : int , _a : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case__ : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
snake_case__ : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def A__ ( _a : int , _a : int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
snake_case__ : str ='''0''' + str(bin(__A ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case__ : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
snake_case__ : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
snake_case__ : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( _A , unittest.TestCase ):
_a : Optional[Any] = UnCLIPImageVariationPipeline
_a : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
_a : Optional[Any] = IMAGE_VARIATION_BATCH_PARAMS
_a : str = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
_a : Any = False
@property
def lowercase__ ( self ):
return 3_2
@property
def lowercase__ ( self ):
return 3_2
@property
def lowercase__ ( self ):
return self.time_input_dim
@property
def lowercase__ ( self ):
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
return 1_0_0
@property
def lowercase__ ( self ):
snake_case__ : Dict =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[int] =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : int ={
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
snake_case__ : Optional[int] =UnCLIPTextProjModel(**a )
return model
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : List[str] ={
"""sample_size""": 3_2,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
snake_case__ : List[str] =UNetaDConditionModel(**a )
return model
@property
def lowercase__ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : Union[str, Any] =UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowercase__ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case__ : int =UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =self.dummy_decoder
snake_case__ : Any =self.dummy_text_proj
snake_case__ : Optional[int] =self.dummy_text_encoder
snake_case__ : Optional[Any] =self.dummy_tokenizer
snake_case__ : List[str] =self.dummy_super_res_first
snake_case__ : str =self.dummy_super_res_last
snake_case__ : List[Any] =UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
snake_case__ : Any =UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
snake_case__ : str =CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case__ : List[Any] =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowercase__ ( self , a , a=0 , a=True ):
snake_case__ : str =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a ) ).to(a )
if str(a ).startswith("""mps""" ):
snake_case__ : str =torch.manual_seed(a )
else:
snake_case__ : List[Any] =torch.Generator(device=a ).manual_seed(a )
if pil_image:
snake_case__ : Optional[int] =input_image * 0.5 + 0.5
snake_case__ : Tuple =input_image.clamp(0 , 1 )
snake_case__ : Union[str, Any] =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case__ : Tuple =DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowercase__ ( self ):
snake_case__ : Union[str, Any] ="""cpu"""
snake_case__ : Dict =self.get_dummy_components()
snake_case__ : Tuple =self.pipeline_class(**a )
snake_case__ : Optional[Any] =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : Optional[int] =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : List[str] =pipe(**a )
snake_case__ : List[str] =output.images
snake_case__ : Any =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : List[Any] =pipe(
**a , return_dict=a , )[0]
snake_case__ : Union[str, Any] =image[0, -3:, -3:, -1]
snake_case__ : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Optional[Any] =np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
snake_case__ : Dict ="""cpu"""
snake_case__ : int =self.get_dummy_components()
snake_case__ : Any =self.pipeline_class(**a )
snake_case__ : Tuple =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : int =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : Any =pipe(**a )
snake_case__ : Optional[int] =output.images
snake_case__ : int =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : List[str] =pipe(
**a , return_dict=a , )[0]
snake_case__ : List[str] =image[0, -3:, -3:, -1]
snake_case__ : List[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : str =np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
snake_case__ : List[str] ="""cpu"""
snake_case__ : Dict =self.get_dummy_components()
snake_case__ : Tuple =self.pipeline_class(**a )
snake_case__ : List[Any] =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : Tuple =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : Any =[
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
snake_case__ : Tuple =pipe(**a )
snake_case__ : str =output.images
snake_case__ : Tuple =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : str =[
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
snake_case__ : Union[str, Any] =pipe(
**a , return_dict=a , )[0]
snake_case__ : List[Any] =image[0, -3:, -3:, -1]
snake_case__ : Tuple =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case__ : int =np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
snake_case__ : Optional[int] =torch.device("""cpu""" )
class _lowercase :
_a : Tuple = 1
snake_case__ : Optional[Any] =self.get_dummy_components()
snake_case__ : List[str] =self.pipeline_class(**a )
snake_case__ : int =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : List[Any] =torch.Generator(device=a ).manual_seed(0 )
snake_case__ : List[Any] =pipe.decoder.dtype
snake_case__ : str =1
snake_case__ : Dict =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case__ : List[Any] =pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
snake_case__ : Optional[int] =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case__ : Dict =pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
snake_case__ : Union[str, Any] =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : List[str] =pipe(
**a , decoder_latents=a , super_res_latents=a ).images
snake_case__ : Any =self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
snake_case__ : Optional[Any] =pipeline_inputs.pop("""image""" )
snake_case__ : Optional[int] =pipe.image_encoder(a ).image_embeds
snake_case__ : Dict =pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowercase__ ( self ):
snake_case__ : List[str] =torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case__ : Tuple =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def lowercase__ ( self ):
snake_case__ : List[Any] =torch_device == """cpu"""
snake_case__ : List[Any] =True
snake_case__ : List[Any] =[
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def lowercase__ ( self ):
snake_case__ : Dict =[
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case__ : List[Any] =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def lowercase__ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase__ ( self ):
return super().test_save_load_local()
@skip_mps
def lowercase__ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowercase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
snake_case__ : str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
snake_case__ : Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
snake_case__ : List[Any] =UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
snake_case__ : Optional[int] =pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
snake_case__ : Optional[Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ : Tuple =pipeline(
a , generator=a , output_type="""np""" , )
snake_case__ : Union[str, Any] =output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(a , a , 1_5 )
| 448
| 0
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
_lowerCAmelCase :str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCAmelCase :int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =field(
default=__lowercase ,metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} ,)
a__ =field(
default=__lowercase ,metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowercase )} ,)
a__ =field(
default=__lowercase ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a__ =field(
default=__lowercase ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a__ =field(
default=__lowercase ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =field(
default=__lowercase ,metadata={'''help''': '''The input training data file (a text file).'''} )
a__ =field(
default=__lowercase ,metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} ,)
a__ =field(
default=__lowercase ,metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} ,)
a__ =field(
default=__lowercase ,metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} ,)
a__ =field(
default=__lowercase ,metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} ,)
a__ =field(
default=__lowercase ,metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} ,)
a__ =field(
default=__lowercase ,metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
a__ =field(default=__lowercase ,metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
a__ =field(
default=0.15 ,metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
a__ =field(
default=1 / 6 ,metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} ,)
a__ =field(
default=5 ,metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
a__ =field(
default=-1 ,metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} ,)
a__ =field(
default=__lowercase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCamelCase_ (UpperCamelCase__ : DataTrainingArguments , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , ):
def _dataset(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCamelCase_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCAmelCase : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCAmelCase : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
_UpperCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
_UpperCAmelCase : Dict = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
_UpperCAmelCase : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCAmelCase : List[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCAmelCase : Tuple = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCAmelCase : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCAmelCase : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCAmelCase : int = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
_UpperCAmelCase : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : Any = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCAmelCase : Dict = trainer.evaluate()
_UpperCAmelCase : List[Any] = math.exp(eval_output['''eval_loss'''] )
_UpperCAmelCase : Any = {"""perplexity""": perplexity}
_UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , UpperCamelCase__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 506
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCamelCase :Union[str, Any] = logging.getLogger(__name__)
__lowerCamelCase :str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__lowerCamelCase :int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowercase)} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''})
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
snake_case__ : bool =field(
default=__lowercase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
snake_case__ : bool =field(
default=__lowercase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''})
snake_case__ : bool =field(default=__lowercase , metadata={'''help''': '''Whether ot not to use whole word mask.'''})
snake_case__ : float =field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''})
snake_case__ : float =field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
snake_case__ : int =field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''})
snake_case__ : int =field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
snake_case__ : bool =field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
def snake_case ( UpperCamelCase__ : DataTrainingArguments , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , ) -> Optional[int]:
def _dataset(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCamelCase : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCamelCase : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowerCamelCase : Tuple = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase : Dict = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowerCamelCase : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCamelCase : List[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCamelCase : Tuple = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCamelCase : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCamelCase : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCamelCase : int = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
lowerCamelCase : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase : List[Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase : Any = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase : Dict = trainer.evaluate()
lowerCamelCase : List[Any] = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase : Any = {"""perplexity""": perplexity}
lowerCamelCase : List[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def snake_case ( UpperCamelCase__ : Optional[int] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 222
| 0
|
from __future__ import annotations
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = []
create_all_state(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , [] , SCREAMING_SNAKE_CASE )
return result
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE , level - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_list.pop()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in total_list:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = 4
__lowercase = 2
__lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 700
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""yjernite/retribert-base-uncased""": 512,
}
__lowercase = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class _lowercase ( __lowerCamelCase ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = RetriBertTokenizer
_lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]="[UNK]" , lowerCamelCase__ : Optional[Any]="[SEP]" , lowerCamelCase__ : List[Any]="[PAD]" , lowerCamelCase__ : Tuple="[CLS]" , lowerCamelCase__ : List[Any]="[MASK]" , lowerCamelCase__ : Dict=True , lowerCamelCase__ : str=None , **lowerCamelCase__ : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
A_ = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**lowerCamelCase__ )
A_ = do_lower_case
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
A_ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 563
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
A_: List[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = r"""\w+[.]\d+"""
_lowercase = re.findall(_A ,_A )
for pat in pats:
_lowercase = key.replace(_A ,"""_""".join(pat.split(""".""" ) ) )
return key
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
_lowercase = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_lowercase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_lowercase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_lowercase = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowercase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_lowercase = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowercase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
_lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowercase = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowercase = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( _A ,_A ,_A=42 ):
"""simple docstring"""
_lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_lowercase = flax_model.init_weights(PRNGKey(_A ) )
_lowercase = flatten_dict(_A )
_lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowercase = rename_key(_A )
_lowercase = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
_lowercase , _lowercase = rename_key_and_reshape_tensor(_A ,_A ,_A )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_lowercase = jnp.asarray(_A )
return unflatten_dict(_A )
| 398
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ):
@register_to_config
def __init__( self :Tuple , *,
lowercase :int = 4 , lowercase :int = 7_6_8 , lowercase :int , lowercase :Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(lowercase ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE = nn.Linear(lowercase , lowercase )
SCREAMING_SNAKE_CASE = nn.Linear(lowercase , lowercase )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE = clip_extra_context_tokens
SCREAMING_SNAKE_CASE = nn.Linear(
lowercase , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE = nn.Linear(lowercase , lowercase )
SCREAMING_SNAKE_CASE = nn.LayerNorm(lowercase )
def snake_case__ ( self :Union[str, Any] , *, lowercase :Optional[int] , lowercase :Union[str, Any] , lowercase :Union[str, Any] , lowercase :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE = image_embeddings.shape[0]
SCREAMING_SNAKE_CASE = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE = classifier_free_guidance_embeddings.expand(
lowercase , -1 )
SCREAMING_SNAKE_CASE = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE = self.embedding_proj(lowercase )
SCREAMING_SNAKE_CASE = self.clip_image_embeddings_project_to_time_embeddings(lowercase )
SCREAMING_SNAKE_CASE = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE = self.clip_extra_context_tokens_proj(lowercase )
SCREAMING_SNAKE_CASE = clip_extra_context_tokens.reshape(lowercase , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE = clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE = self.encoder_hidden_states_proj(lowercase )
SCREAMING_SNAKE_CASE = self.text_encoder_hidden_states_norm(lowercase )
SCREAMING_SNAKE_CASE = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 201
| 0
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__snake_case :Tuple = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__snake_case :Tuple = 10
__snake_case :str = 256
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( _UpperCAmelCase ):
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class _A :
def __init__( self : Optional[Any] , *,
__SCREAMING_SNAKE_CASE : float = 0.85 , ):
'''simple docstring'''
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : MinHash):
'''simple docstring'''
__a = self._index.query(__SCREAMING_SNAKE_CASE)
if code_key in self._index.keys:
print(F'Duplicate key {code_key}')
return
self._index.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if len(__SCREAMING_SNAKE_CASE) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__SCREAMING_SNAKE_CASE)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(__SCREAMING_SNAKE_CASE)
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__SCREAMING_SNAKE_CASE)
return duplicate_clusters
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = self.get_duplicate_clusters()
with open(__SCREAMING_SNAKE_CASE , '''w''') as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __snake_case ( _UpperCAmelCase ):
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( _UpperCAmelCase ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCAmelCase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ) , max_queue_size=100 ) ):
di.add(_UpperCAmelCase , _UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = get_tokens(_UpperCAmelCase )
__a = get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__snake_case :List[Any] = None
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_UpperCAmelCase , _UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(_UpperCAmelCase )
return extremes
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase , _UpperCAmelCase , ) , total=len(_UpperCAmelCase ) , ):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = 0.85 ):
__a = make_duplicate_clusters(_UpperCAmelCase , _UpperCAmelCase )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda _UpperCAmelCase , _UpperCAmelCase : idx not in remove_indices , with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(_UpperCAmelCase )}' )
print(f'Number of duplicate clusters: {len(_UpperCAmelCase )}' )
print(f'Files in duplicate cluster: {len(_UpperCAmelCase )}' )
print(f'Unique files in duplicate cluster: {len(_UpperCAmelCase )}' )
print(f'Filtered dataset size: {len(_UpperCAmelCase )}' )
return ds_filter, duplicate_clusters
| 60
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ , param_name="crop_size" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ = do_convert_rgb
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE__ )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="size" , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="crop_size" , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 61
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __snake_case ( _lowerCAmelCase : Tuple ) -> Optional[Any]:
A_ : Optional[Any] = create_tensor(_lowerCAmelCase )
A_ : Dict = gather(_lowerCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Any:
A_ : int = [state.process_index]
A_ : Union[str, Any] = gather_object(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == state.num_processes, f"{gathered_obj}, {len(_lowerCAmelCase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Optional[Any] = broadcast(_lowerCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __snake_case ( _lowerCAmelCase : Dict ) -> str:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
A_ : Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A_ : List[str] = torch.arange(state.num_processes ).to(state.device )
A_ : Any = pad_across_processes(_lowerCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Tuple:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : str = create_tensor(_lowerCAmelCase )
A_ : int = reduce(_lowerCAmelCase , "sum" )
A_ : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Tuple = reduce(_lowerCAmelCase , "mean" )
A_ : List[str] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : List[str] ) -> Dict:
# For xla_spawn (TPUs)
main()
def __snake_case ( ) -> List[str]:
A_ : Tuple = PartialState()
state.print(f"State: {state}" )
state.print("testing gather" )
test_gather(_lowerCAmelCase )
state.print("testing gather_object" )
test_gather_object(_lowerCAmelCase )
state.print("testing broadcast" )
test_broadcast(_lowerCAmelCase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_lowerCAmelCase )
state.print("testing reduce_sum" )
test_reduce_sum(_lowerCAmelCase )
state.print("testing reduce_mean" )
test_reduce_mean(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454
| 0
|
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = len(lowercase__ )
for i in range(1 , lowercase__ ):
__lowerCAmelCase : Optional[Any] = collection[i]
__lowerCAmelCase : Any = 0
__lowerCAmelCase : int = i - 1
while low <= high:
__lowerCAmelCase : Union[str, Any] = (low + high) // 2
if val < collection[mid]:
__lowerCAmelCase : int = mid - 1
else:
__lowerCAmelCase : List[str] = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
__lowerCAmelCase : int = collection[j - 1]
__lowerCAmelCase : int = val
return collection
if __name__ == "__main__":
_UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
_UpperCamelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 583
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583
| 1
|
lowerCAmelCase = [0, 2, 4, 6, 8]
lowerCAmelCase = [1, 3, 5, 7, 9]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ = 0
for digit in range(10 ):
lowercase__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return result
lowercase__ = 0
for digita in range(10 ):
lowercase__ = digita
if (remainder + digita) % 2 == 0:
lowercase__ = ODD_DIGITS
else:
lowercase__ = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return result
def _a ( SCREAMING_SNAKE_CASE = 9 ):
"""simple docstring"""
lowercase__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE , 0 , [0] * length , SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 43
|
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise TypeError("Input value must be an 'int' type" )
lowercase__ : str = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 397
| 0
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase : str = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
lowercase : Any = {
'Salesforce/codegen-350M-mono': 2_0_4_8,
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['input_ids', 'attention_mask']
lowerCAmelCase = CodeGenTokenizer
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Dict="<|endoftext|>" , SCREAMING_SNAKE_CASE : Optional[int]="<|endoftext|>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE : Tuple=False , **SCREAMING_SNAKE_CASE : Dict , ) -> Any:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if kwargs.pop("add_bos_token" , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = add_prefix_space
def __A ( self : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ) -> BatchEncoding:
"""simple docstring"""
lowerCAmelCase = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : List[str] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
lowerCAmelCase = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def __A ( self : int , SCREAMING_SNAKE_CASE : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> str:
"""simple docstring"""
lowerCAmelCase = super().decode(
token_ids=SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase = self.truncate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return decoded_text
def __A ( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
def find_re(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowerCAmelCase = pattern.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return m.start() if m else -1
lowerCAmelCase = [re.compile(SCREAMING_SNAKE_CASE , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCAmelCase = list(re.finditer("^print" , SCREAMING_SNAKE_CASE , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE ) > 1:
lowerCAmelCase = completion[: prints[1].start()]
lowerCAmelCase = list(re.finditer("^def" , SCREAMING_SNAKE_CASE , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE ) > 1:
lowerCAmelCase = completion[: defs[1].start()]
lowerCAmelCase = 0
lowerCAmelCase = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE )]
else:
return completion
| 159
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline
lowerCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase = False
@property
def __A ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 3_2
@property
def __A ( self : Any ) -> List[str]:
"""simple docstring"""
return 3_2
@property
def __A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __A ( self : int ) -> List[Any]:
"""simple docstring"""
return 1_0_0
@property
def __A ( self : str ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE )
return model
@property
def __A ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __A ( self : Any ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCAmelCase = DDIMScheduler(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str=0 ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE )
# create init_image
lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create hint
lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowerCAmelCase = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __A ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = "cpu"
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase = init_image.resize((5_1_2, 5_1_2) )
lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowerCAmelCase = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE ) ).float() / 2_5_5.0
lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase = "A robot, 4k photo"
lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase = pipe_prior(
SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.8_5 , generator=SCREAMING_SNAKE_CASE , negative_prompt="" , ).to_tuple()
lowerCAmelCase = pipeline(
image=SCREAMING_SNAKE_CASE , image_embeds=SCREAMING_SNAKE_CASE , negative_image_embeds=SCREAMING_SNAKE_CASE , hint=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , )
lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 159
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> Tuple:
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a = test_metrics
@require_cpu
def UpperCamelCase__ ( self ) -> Tuple:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase__ ( self ) -> str:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase__ ( self ) -> Dict:
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase__ ( self ) -> int:
print(f"Found {torch.cuda.device_count()} devices." )
__a = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 539
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowercase ( __lowerCAmelCase ):
'''simple docstring'''
_A : List[Any] = "pegasus"
_A : int = ["past_key_values"]
_A : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , _a : Dict=50_265 , _a : Tuple=1_024 , _a : str=12 , _a : Dict=4_096 , _a : Optional[Any]=16 , _a : Union[str, Any]=12 , _a : str=4_096 , _a : str=16 , _a : Dict=0.0 , _a : Any=0.0 , _a : Dict=True , _a : Union[str, Any]=True , _a : Dict="gelu" , _a : int=1_024 , _a : Dict=0.1 , _a : Any=0.0 , _a : int=0.0 , _a : Optional[int]=0.02 , _a : List[str]=0 , _a : Tuple=False , _a : List[Any]=0 , _a : List[str]=1 , _a : Tuple=1 , **_a : List[Any] , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , forced_eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
@property
def A_ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def A_ ( self : Any ):
return self.d_model
| 718
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase = get_logger()
lowercase = None
class __lowercase ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
'''simple docstring'''
def __init__( self : List[str] , _a : Optional[Any]=None , _a : Any=None , **_a : List[Any] ):
super().__init__(features=_a )
import jax
from jaxlib.xla_client import Device
if isinstance(_a , _a ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
UpperCamelCase__ = device if isinstance(_a , _a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
UpperCamelCase__ = str(jax.devices()[0] )
UpperCamelCase__ = jnp_array_kwargs
@staticmethod
def A_ ( ):
import jax
return {str(_a ): device for device in jax.devices()}
def A_ ( self : Optional[int] , _a : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(_a , _a ) and column:
if all(
isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_a , axis=0 )
return column
def A_ ( self : Optional[int] , _a : List[Any] ):
import jax
import jax.numpy as jnp
if isinstance(_a , (str, bytes, type(_a )) ):
return value
elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ = {}
if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase__ = {'''dtype''': jnp.intaa}
else:
UpperCamelCase__ = {'''dtype''': jnp.intaa}
elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a , PIL.Image.Image ):
UpperCamelCase__ = np.asarray(_a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} )
def A_ ( self : Optional[Any] , _a : Any ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_a , '''__array__''' ) and not isinstance(_a , jax.Array ):
UpperCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def A_ ( self : int , _a : dict ):
return map_nested(self._recursive_tensorize , _a , map_list=_a )
def A_ ( self : List[Any] , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_row(_a )
UpperCamelCase__ = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def A_ ( self : Optional[int] , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_column(_a )
UpperCamelCase__ = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] )
UpperCamelCase__ = self.recursive_tensorize(_a )
UpperCamelCase__ = self._consolidate(_a )
return column
def A_ ( self : int , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_batch(_a )
UpperCamelCase__ = self.python_features_decoder.decode_batch(_a )
UpperCamelCase__ = self.recursive_tensorize(_a )
for column_name in batch:
UpperCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 591
| 0
|
import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case__ : str = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
_a = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
_a = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_a = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_a = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase_ : Optional[Any] = self.task_name.lower()
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "train"
_a = "dev"
_a = "test"
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = 42
_a = 42
_a = 42
def __init__( self : Any , __a : GlueDataTrainingArguments , __a : PreTrainedTokenizerBase , __a : Optional[int] = None , __a : Union[str, Split] = Split.train , __a : Optional[str] = None , ) ->Optional[int]:
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , __a , )
lowerCamelCase_ : Optional[int] = args
lowerCamelCase_ : Tuple = glue_processors[args.task_name]()
lowerCamelCase_ : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(__a , __a ):
try:
lowerCamelCase_ : List[Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
lowerCamelCase_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
lowerCamelCase_ : Any = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_, lowerCamelCase_ : int = label_list[2], label_list[1]
lowerCamelCase_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ : List[Any] = cached_features_file + """.lock"""
with FileLock(__a ):
if os.path.exists(__a ) and not args.overwrite_cache:
lowerCamelCase_ : str = time.time()
lowerCamelCase_ : int = torch.load(__a )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
lowerCamelCase_ : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase_ : Tuple = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase_ : List[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase_ : Dict = examples[:limit_length]
lowerCamelCase_ : Union[str, Any] = glue_convert_examples_to_features(
__a , __a , max_length=args.max_seq_length , label_list=__a , output_mode=self.output_mode , )
lowerCamelCase_ : Optional[Any] = time.time()
torch.save(self.features , __a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Any ) ->Any:
return len(self.features )
def __getitem__( self : List[Any] , __a : Optional[int] ) ->InputFeatures:
return self.features[i]
def _lowerCAmelCase ( self : int ) ->Optional[int]:
return self.label_list
| 278
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__ ( A__ ):
A = 'unispeech-sat'
def __init__( self : int,_A : Dict=32,_A : Tuple=768,_A : Optional[Any]=12,_A : Any=12,_A : List[Any]=3072,_A : Optional[Any]="gelu",_A : Dict=0.1,_A : List[str]=0.1,_A : List[Any]=0.1,_A : Dict=0.0,_A : str=0.0,_A : Union[str, Any]=0.1,_A : List[str]=0.1,_A : Optional[int]=0.02,_A : int=1E-5,_A : Any="group",_A : Tuple="gelu",_A : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512),_A : str=(5, 2, 2, 2, 2, 2, 2),_A : Optional[Any]=(10, 3, 3, 3, 3, 2, 2),_A : Tuple=False,_A : int=128,_A : Tuple=16,_A : Tuple=False,_A : Tuple=True,_A : Dict=0.05,_A : Tuple=10,_A : str=2,_A : List[str]=0.0,_A : Union[str, Any]=10,_A : Dict=0,_A : Union[str, Any]=320,_A : str=2,_A : int=0.1,_A : Optional[Any]=100,_A : Dict=256,_A : Optional[int]=256,_A : Tuple=0.1,_A : List[Any]="mean",_A : Tuple=False,_A : List[Any]=False,_A : Tuple=256,_A : Union[str, Any]=(512, 512, 512, 512, 1500),_A : List[str]=(5, 3, 3, 1, 1),_A : int=(1, 2, 3, 1, 1),_A : List[str]=512,_A : Dict=0,_A : Tuple=1,_A : int=2,_A : Optional[int]=504,**_A : int,):
"""simple docstring"""
super().__init__(**_A,pad_token_id=_A,bos_token_id=_A,eos_token_id=_A )
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_norm
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_activation
SCREAMING_SNAKE_CASE_ : List[Any] = list(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = list(_A )
SCREAMING_SNAKE_CASE_ : str = list(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE_ : List[Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : Tuple = activation_dropout
SCREAMING_SNAKE_CASE_ : Any = feat_proj_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = final_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = num_clusters
SCREAMING_SNAKE_CASE_ : List[str] = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : Optional[int] = apply_spec_augment
SCREAMING_SNAKE_CASE_ : Any = mask_time_prob
SCREAMING_SNAKE_CASE_ : Dict = mask_time_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_feature_prob
SCREAMING_SNAKE_CASE_ : int = mask_feature_length
SCREAMING_SNAKE_CASE_ : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ : Any = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ : Optional[int] = num_codevector_groups
SCREAMING_SNAKE_CASE_ : Tuple = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ : Optional[Any] = feat_quantizer_dropout
SCREAMING_SNAKE_CASE_ : Any = num_negatives
SCREAMING_SNAKE_CASE_ : str = codevector_dim
SCREAMING_SNAKE_CASE_ : str = proj_codevector_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ : str = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(_A )
SCREAMING_SNAKE_CASE_ : Dict = list(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = list(_A )
SCREAMING_SNAKE_CASE_ : Any = xvector_output_dim
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return functools.reduce(operator.mul,self.conv_stride,1 )
| 316
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCamelCase : Dict = HUGGINGFACE_HUB_CACHE
__lowerCamelCase : Union[str, Any] = '''config.json'''
__lowerCamelCase : Tuple = '''diffusion_pytorch_model.bin'''
__lowerCamelCase : Tuple = '''diffusion_flax_model.msgpack'''
__lowerCamelCase : Dict = '''model.onnx'''
__lowerCamelCase : Optional[Any] = '''diffusion_pytorch_model.safetensors'''
__lowerCamelCase : Tuple = '''weights.pb'''
__lowerCamelCase : int = '''https://huggingface.co'''
__lowerCamelCase : Tuple = default_cache_path
__lowerCamelCase : Optional[int] = '''diffusers_modules'''
__lowerCamelCase : Tuple = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
__lowerCamelCase : Dict = ['''fp16''', '''non-ema''']
__lowerCamelCase : Optional[int] = '''.self_attn'''
| 316
| 1
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = torch.nn.Linear(10 , 10 )
a :Dict = torch.optim.SGD(model.parameters() , 0.1 )
a :Any = Accelerator()
a :List[Any] = accelerator.prepare(lowerCamelCase_ )
try:
pickle.loads(pickle.dumps(lowerCamelCase_ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 445
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__)
UpperCamelCase__ : Dict = 50 # max width of layer names
UpperCamelCase__ : Any = 70 # max width of quantizer names
def __UpperCamelCase( _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=_A , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=_A , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=_A , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=_A , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=_A , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=_A , type=_A , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_A , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def __UpperCamelCase( _A : Tuple ):
'''simple docstring'''
if args.calibrator == "max":
UpperCAmelCase__ : str = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
UpperCAmelCase__ : Dict = '''histogram'''
elif args.calibrator == "mse":
UpperCAmelCase__ : Any = '''histogram'''
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
UpperCAmelCase__ : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=_A )
UpperCAmelCase__ : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_A )
quant_nn.QuantLinear.set_default_quant_desc_weight(_A )
def __UpperCamelCase( _A : Any , _A : Any , _A : Any=False , _A : Optional[Any]=False ):
'''simple docstring'''
logger.info('''Configuring Model for Quantization''' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_A , ['''embeddings'''] , which='''weight''' , _disabled=_A )
if args.quant_disable:
set_quantizer_by_name(_A , [''''''] , _disabled=_A )
if args.quant_disable_keyword:
set_quantizer_by_name(_A , args.quant_disable_keyword , _disabled=_A )
if args.quant_disable_layer_module:
set_quantizer_by_name(_A , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_A )
if args.quant_enable_layer_module:
set_quantizer_by_name(_A , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_A )
if args.recalibrate_weights:
recalibrate_weights(_A )
if args.fuse_qkv:
fuse_qkv(_A , _A )
if args.clip_gelu:
clip_gelu(_A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_A )
def __UpperCamelCase( _A : str ):
'''simple docstring'''
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def __UpperCamelCase( _A : Tuple , _A : Any ):
'''simple docstring'''
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_A )
def __UpperCamelCase( _A : Dict , _A : Optional[int] ):
'''simple docstring'''
def fusea(_A : Optional[Any] , _A : Optional[Any] , _A : Dict ):
for mod in [qq, qk, qv]:
if not hasattr(_A , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
UpperCAmelCase__ : Dict = qq._amax.detach().item()
UpperCAmelCase__ : List[Any] = qk._amax.detach().item()
UpperCAmelCase__ : Optional[int] = qv._amax.detach().item()
UpperCAmelCase__ : Dict = max(_A , _A , _A )
qq._amax.fill_(_A )
qk._amax.fill_(_A )
qv._amax.fill_(_A )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __UpperCamelCase( _A : Dict , _A : Any ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
UpperCAmelCase__ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_A )
UpperCAmelCase__ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def __UpperCamelCase( _A : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_A , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase__ : int = mod.weight.shape[0]
UpperCAmelCase__ : Tuple = mod._weight_quantizer._amax.detach()
UpperCAmelCase__ : Optional[int] = torch.ones(_A , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def __UpperCamelCase( _A : List[str] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_A , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase__ : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase__ : Optional[Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase__ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_A , keepdims=_A ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
UpperCAmelCase__ : str = amax
def __UpperCamelCase( _A : Dict , _A : Tuple=25 , _A : Any=1_80 , _A : Optional[int]=None ):
'''simple docstring'''
if ignore is None:
UpperCAmelCase__ : Dict = []
elif not isinstance(_A , _A ):
UpperCAmelCase__ : int = [ignore]
UpperCAmelCase__ : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(_A , '''weight''' ):
continue
UpperCAmelCase__ : Dict = max(_A , len(_A ) )
for name, mod in model.named_modules():
UpperCAmelCase__ : str = getattr(_A , '''_input_quantizer''' , _A )
UpperCAmelCase__ : int = getattr(_A , '''_weight_quantizer''' , _A )
if not hasattr(_A , '''weight''' ):
continue
if type(_A ) in ignore:
continue
if [True for s in ignore if type(_A ) is str and s in name]:
continue
UpperCAmelCase__ : Dict = F'''Act:{input_q.extra_repr()}'''
UpperCAmelCase__ : int = F'''Wgt:{weight_q.extra_repr()}'''
UpperCAmelCase__ : Dict = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_A ) <= line_width:
logger.info(_A )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = 0
for name, mod in model.named_modules():
if isinstance(_A , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def __UpperCamelCase( _A : Dict , _A : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = getattr(_A , _A , _A )
if quantizer_mod is not None:
assert hasattr(_A , _A )
setattr(_A , _A , _A )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def __UpperCamelCase( _A : str , _A : Any , _A : Optional[int]="both" , **_A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_A , _A , '''_input_quantizer''' , _A , _A )
if which in ["weight", "both"]:
set_quantizer(_A , _A , '''_weight_quantizer''' , _A , _A )
logger.info(_A )
def __UpperCamelCase( _A : Tuple , _A : List[str] , **_A : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_A , '''_input_quantizer''' ) or hasattr(_A , '''_weight_quantizer''' ):
for n in names:
if re.search(_A , _A ):
set_quantizers(_A , _A , **_A )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_A , _A ):
UpperCAmelCase__ : str = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_A , _A , _A )
logger.info(_A )
| 614
| 0
|
from __future__ import annotations
from collections.abc import MutableSequence
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__ ):
if len(a__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
_lowerCamelCase = list(a__ )
_lowerCamelCase = degree
def __add__( self , a__ ):
if self.degree > polynomial_a.degree:
_lowerCamelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , a__ )
else:
_lowerCamelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , a__ )
def __sub__( self , a__ ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , a__ ):
_lowerCamelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
_lowerCamelCase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(a__ )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCAmelCase ( self ):
_lowerCamelCase = [0] * self.degree
for i in range(self.degree ):
_lowerCamelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , a__ )
def _UpperCAmelCase ( self , a__ = 0 ):
_lowerCamelCase = [0] * (self.degree + 2)
_lowerCamelCase = constant
for i in range(self.degree + 1 ):
_lowerCamelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , a__ )
def __eq__( self , a__ ):
if not isinstance(a__ , a__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , a__ ):
return not self.__eq__(a__ )
| 297
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=2 , a__=56 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=2 , a__=7 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=4 , a__="block_sparse" , a__=True , a__=False , a__=2 , a__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def _UpperCAmelCase ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class __magic_name__ ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_UpperCamelCase = False
_UpperCamelCase = False
def _UpperCAmelCase ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
super().test_hidden_states_output()
@slow
def _UpperCAmelCase ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(a__ )
def _UpperCAmelCase ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(a__ , a__ )
_lowerCamelCase = model_class(a__ )
@jax.jit
def model_jitted(a__ , a__=None , **a__ ):
return model(input_ids=a__ , attention_mask=a__ , **a__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__=1E-5 , a__="outputs" , a__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(a__ , a__ , a__ , a__ , a__ , a__ )
| 297
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_a = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
_a = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
_a = """▁"""
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self , __a=None , __a=None , __a=True , __a=True , __a=False , __a="[CLS]" , __a="[SEP]" , __a="<unk>" , __a="[SEP]" , __a="<pad>" , __a="[CLS]" , __a="[MASK]" , **__a , ) -> str:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCamelCase = (
AddedToken(__a , lstrip=__a , rstrip=__a , normalized=__a)
if isinstance(__a , __a)
else mask_token
)
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__a):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file , __a)
return (out_vocab_file,)
| 19
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case_ = datasets.load_iris()
snake_case_ = np.array(data["""data"""])
snake_case_ = np.array(data["""target"""])
snake_case_ = data["""target_names"""]
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(X, y)
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Tuple ):
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :List[str]=5 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE : Tuple = []
for data_point in data:
SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE : Dict = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE : Tuple = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 507
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = "altclip_text_model"
def __init__( self, lowerCamelCase__=25_0002, lowerCamelCase__=1024, lowerCamelCase__=24, lowerCamelCase__=16, lowerCamelCase__=4096, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=514, lowerCamelCase__=1, lowerCamelCase__=0.02, lowerCamelCase__=0.02, lowerCamelCase__=1e-05, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=768, **lowerCamelCase__, ):
super().__init__(pad_token_id=__A, bos_token_id=__A, eos_token_id=__A, **__A )
A : Any = vocab_size
A : Optional[Any] = hidden_size
A : Union[str, Any] = num_hidden_layers
A : Optional[Any] = num_attention_heads
A : int = hidden_act
A : int = intermediate_size
A : int = hidden_dropout_prob
A : str = attention_probs_dropout_prob
A : List[str] = max_position_embeddings
A : Optional[int] = type_vocab_size
A : List[str] = initializer_range
A : Optional[int] = initializer_factor
A : str = layer_norm_eps
A : List[str] = position_embedding_type
A : int = use_cache
A : str = project_dim
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "altclip_vision_model"
def __init__( self, lowerCamelCase__=768, lowerCamelCase__=3072, lowerCamelCase__=512, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=32, lowerCamelCase__="quick_gelu", lowerCamelCase__=1e-5, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, **lowerCamelCase__, ):
super().__init__(**__A )
A : Union[str, Any] = hidden_size
A : int = intermediate_size
A : List[Any] = projection_dim
A : Optional[int] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : List[str] = num_channels
A : Tuple = patch_size
A : str = image_size
A : int = initializer_range
A : str = initializer_factor
A : Tuple = attention_dropout
A : Union[str, Any] = layer_norm_eps
A : Any = hidden_act
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, **lowerCamelCase__ ):
cls._set_token_in_kwargs(__A )
A , A : List[Any] = cls.get_config_dict(__A, **__A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
A : Dict = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A, **__A )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "altclip"
__lowerCamelCase : Optional[int] = True
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=768, lowerCamelCase__=2.6592, **lowerCamelCase__ ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A : Tuple = kwargs.pop("""text_config_dict""", __A )
A : Optional[int] = kwargs.pop("""vision_config_dict""", __A )
super().__init__(**__A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A : Optional[Any] = {}
# This is the complete result when using `text_config_dict`.
A : Tuple = AltCLIPTextConfig(**__A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A : Dict = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict[\"{key}\"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A : List[Any] = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config[\"{key}\"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A : List[Any] = {}
# This is the complete result when using `vision_config_dict`.
A : Optional[int] = AltCLIPVisionConfig(**__A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A : str = {
str(__A ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A : Optional[Any] = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict[\"{key}\"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A : List[str] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config[\"{key}\"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A : str = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
A : List[str] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
A : Union[str, Any] = AltCLIPTextConfig(**__A )
A : Dict = AltCLIPVisionConfig(**__A )
A : List[Any] = projection_dim
A : Optional[int] = logit_scale_init_value
A : str = 1.0
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **__A )
def _lowerCAmelCase ( self ):
A : List[Any] = copy.deepcopy(self.__dict__ )
A : int = self.text_config.to_dict()
A : Tuple = self.vision_config.to_dict()
A : List[Any] = self.__class__.model_type
return output
| 718
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : str = size if size is not None else {"""shortest_edge""": 384}
A : List[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : str = do_resize
A : Union[str, Any] = size
# Default value set here for backwards compatibility where the value in config is None
A : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
A : Any = resample
A : List[Any] = do_rescale
A : Union[str, Any] = rescale_factor
A : Tuple = do_normalize
A : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : int = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : Dict = int(shortest_edge / crop_pct )
A : Optional[int] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : List[str] = do_resize if do_resize is not None else self.do_resize
A : str = crop_pct if crop_pct is not None else self.crop_pct
A : List[str] = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Optional[int] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Dict = size if size is not None else self.size
A : Dict = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : Dict = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Dict = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : List[str] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Any = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 520
| 0
|
"""simple docstring"""
def A_ ( snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def A_ ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f'| 0 | 0 | {nor_gate(0 ,0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 ,1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 ,0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 ,1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 499
|
"""simple docstring"""
def A_ ( snake_case_ : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
UpperCamelCase : List[Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,snake_case_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 499
| 1
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : List[Any] = 2_5_6
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = ["melgan"]
def __init__( self , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
# From MELGAN
_SCREAMING_SNAKE_CASE =math.log(1E-5 ) # Matches MelGAN training.
_SCREAMING_SNAKE_CASE =4.0 # Largest value for most examples
_SCREAMING_SNAKE_CASE =1_2_8
self.register_modules(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output_range
if clip:
_SCREAMING_SNAKE_CASE =torch.clip(_A , self.min_value , self.max_value )
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =input_range
_SCREAMING_SNAKE_CASE =torch.clip(_A , _A , _A ) if clip else outputs
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =input_tokens > 0
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.notes_encoder(
encoder_input_tokens=_A , encoder_inputs_mask=_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.continuous_encoder(
encoder_inputs=_A , encoder_inputs_mask=_A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =noise_time
if not torch.is_tensor(_A ):
_SCREAMING_SNAKE_CASE =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_SCREAMING_SNAKE_CASE =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_SCREAMING_SNAKE_CASE =self.decoder(
encodings_and_masks=_A , decoder_input_tokens=_A , decoder_noise_time=_A )
return logits
@torch.no_grad()
def __call__( self , _A , _A = None , _A = 1_0_0 , _A = True , _A = "numpy" , _A = None , _A = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
_SCREAMING_SNAKE_CASE =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =np.zeros([1, 0, self.n_dims] , np.floataa )
_SCREAMING_SNAKE_CASE =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
for i, encoder_input_tokens in enumerate(_A ):
if i == 0:
_SCREAMING_SNAKE_CASE =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_SCREAMING_SNAKE_CASE =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_SCREAMING_SNAKE_CASE =ones
_SCREAMING_SNAKE_CASE =self.scale_features(
_A , output_range=[-1.0, 1.0] , clip=_A )
_SCREAMING_SNAKE_CASE =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_A , continuous_mask=_A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_SCREAMING_SNAKE_CASE =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_SCREAMING_SNAKE_CASE =self.decode(
encodings_and_masks=_A , input_tokens=_A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_SCREAMING_SNAKE_CASE =self.scheduler.step(_A , _A , _A , generator=_A ).prev_sample
_SCREAMING_SNAKE_CASE =self.scale_to_features(_A , input_range=[-1.0, 1.0] )
_SCREAMING_SNAKE_CASE =mel[:1]
_SCREAMING_SNAKE_CASE =mel.cpu().float().numpy()
_SCREAMING_SNAKE_CASE =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A )
logger.info('''Generated segment''' , _A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_SCREAMING_SNAKE_CASE =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_SCREAMING_SNAKE_CASE =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_A )
| 165
|
"""simple docstring"""
def _lowerCAmelCase(a : int ) -> bool:
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =(1 << p) - 1
for _ in range(p - 2 ):
_SCREAMING_SNAKE_CASE =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 165
| 1
|
import numpy as np
def UpperCamelCase ( __lowerCamelCase : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def UpperCamelCase ( __lowerCamelCase : np.ndarray ):
return vector * sigmoid(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( __lowerCamelCase : Namespace ):
return TrainCommand(__lowerCamelCase )
class UpperCAmelCase ( A_ ):
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : ArgumentParser ) -> int:
'''simple docstring'''
snake_case : Any = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=snake_case__ , required=snake_case__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=snake_case__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=snake_case__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=snake_case__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=snake_case__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=snake_case__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=snake_case__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=snake_case__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=snake_case__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=snake_case__ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=snake_case__ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=snake_case__ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=snake_case__ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=snake_case__ )
def __init__(self : List[Any] , snake_case__ : Namespace ) -> Tuple:
'''simple docstring'''
snake_case : Any = logging.get_logger("transformers-cli/training" )
snake_case : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=snake_case__ )
snake_case : Any = args.output
snake_case : List[Any] = args.column_label
snake_case : Tuple = args.column_text
snake_case : str = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
snake_case : Optional[int] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
snake_case : Tuple = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case : Optional[Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case : Union[str, Any] = args.validation_split
snake_case : Optional[Any] = args.train_batch_size
snake_case : List[str] = args.valid_batch_size
snake_case : List[Any] = args.learning_rate
snake_case : List[Any] = args.adam_epsilon
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Tuple:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 204
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = StableDiffusionPanoramaPipeline
_snake_case : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_snake_case : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : List[Any] )-> int:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__UpperCamelCase = DDIMScheduler()
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A ( self : Dict , A_ : str , A_ : Tuple=0 )-> Tuple:
__UpperCamelCase = torch.manual_seed(A_ )
__UpperCamelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A ( self : str )-> List[Any]:
__UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = StableDiffusionPanoramaPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = sd_pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Dict )-> List[str]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Dict )-> Union[str, Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def A ( self : Optional[int] )-> Dict:
__UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = StableDiffusionPanoramaPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = "french fries"
__UpperCamelCase = sd_pipe(**A_ , negative_prompt=A_ )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : str )-> int:
__UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = StableDiffusionPanoramaPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = sd_pipe(**A_ , view_batch_size=2 )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : List[Any] )-> List[str]:
__UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
__UpperCamelCase = StableDiffusionPanoramaPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = sd_pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Optional[int] )-> Optional[int]:
__UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=A_ )
__UpperCamelCase = StableDiffusionPanoramaPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = sd_pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : int )-> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] , A_ : Tuple=0 )-> List[Any]:
__UpperCamelCase = torch.manual_seed(A_ )
__UpperCamelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def A ( self : str )-> Union[str, Any]:
__UpperCamelCase = "stabilityai/stable-diffusion-2-base"
__UpperCamelCase = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" )
__UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
__UpperCamelCase = self.get_inputs()
__UpperCamelCase = pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__UpperCamelCase = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def A ( self : int )-> Optional[int]:
__UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=A_ )
__UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
__UpperCamelCase = self.get_inputs()
__UpperCamelCase = pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A ( self : Union[str, Any] )-> Union[str, Any]:
__UpperCamelCase = 0
def callback_fn(A_ : int , A_ : int , A_ : torch.FloatTensor ) -> None:
__UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__UpperCamelCase = latents[0, -3:, -3:, -1]
__UpperCamelCase = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__UpperCamelCase = latents[0, -3:, -3:, -1]
__UpperCamelCase = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCamelCase = False
__UpperCamelCase = "stabilityai/stable-diffusion-2-base"
__UpperCamelCase = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" )
__UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
__UpperCamelCase = self.get_inputs()
pipe(**A_ , callback=A_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A ( self : int )-> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase = "stabilityai/stable-diffusion-2-base"
__UpperCamelCase = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" )
__UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCamelCase = self.get_inputs()
__UpperCamelCase = pipe(**A_ )
__UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 712
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : torch.FloatTensor
class __UpperCAmelCase ( snake_case__ , snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , A_ : int = 32 , A_ : int = 64 , A_ : int = 20 , A_ : int = 7_68 , A_ : Dict=77 , A_ : Union[str, Any]=4 , A_ : float = 0.0 , A_ : str = "silu" , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[str] = "linear" , A_ : Optional[str] = "prd" , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : Optional[int] = None , )-> Optional[int]:
super().__init__()
__UpperCamelCase = num_attention_heads
__UpperCamelCase = attention_head_dim
__UpperCamelCase = num_attention_heads * attention_head_dim
__UpperCamelCase = additional_embeddings
__UpperCamelCase = time_embed_dim or inner_dim
__UpperCamelCase = embedding_proj_dim or embedding_dim
__UpperCamelCase = clip_embed_dim or embedding_dim
__UpperCamelCase = Timesteps(A_ , A_ , 0 )
__UpperCamelCase = TimestepEmbedding(A_ , A_ , out_dim=A_ , act_fn=A_ )
__UpperCamelCase = nn.Linear(A_ , A_ )
if embedding_proj_norm_type is None:
__UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
__UpperCamelCase = nn.LayerNorm(A_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__UpperCamelCase = nn.Linear(A_ , A_ )
if encoder_hid_proj_type is None:
__UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
__UpperCamelCase = nn.Linear(A_ , A_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , A_ ) )
if added_emb_type == "prd":
__UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , A_ ) )
elif added_emb_type is None:
__UpperCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
A_ , A_ , A_ , dropout=A_ , activation_fn="gelu" , attention_bias=A_ , )
for d in range(A_ )
] )
if norm_in_type == "layer":
__UpperCamelCase = nn.LayerNorm(A_ )
elif norm_in_type is None:
__UpperCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__UpperCamelCase = nn.LayerNorm(A_ )
__UpperCamelCase = nn.Linear(A_ , A_ )
__UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
__UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , A_ , persistent=A_ )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A ( self : Tuple )-> Dict[str, AttentionProcessor]:
__UpperCamelCase = {}
def fn_recursive_add_processors(A_ : str , A_ : torch.nn.Module , A_ : Dict[str, AttentionProcessor] ):
if hasattr(A_ , "set_processor" ):
__UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , A_ , A_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A_ , A_ , A_ )
return processors
def A ( self : Tuple , A_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] )-> Optional[int]:
__UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(A_ , A_ ) and len(A_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(A_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(A_ : str , A_ : torch.nn.Module , A_ : Any ):
if hasattr(A_ , "set_processor" ):
if not isinstance(A_ , A_ ):
module.set_processor(A_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , A_ , A_ )
for name, module in self.named_children():
fn_recursive_attn_processor(A_ , A_ , A_ )
def A ( self : List[str] )-> List[str]:
self.set_attn_processor(AttnProcessor() )
def A ( self : Dict , A_ : str , A_ : Union[torch.Tensor, float, int] , A_ : torch.FloatTensor , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.BoolTensor] = None , A_ : bool = True , )-> Any:
__UpperCamelCase = hidden_states.shape[0]
__UpperCamelCase = timestep
if not torch.is_tensor(A_ ):
__UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
__UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase = timesteps * torch.ones(A_ , dtype=timesteps.dtype , device=timesteps.device )
__UpperCamelCase = self.time_proj(A_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
__UpperCamelCase = self.time_embedding(A_ )
if self.embedding_proj_norm is not None:
__UpperCamelCase = self.embedding_proj_norm(A_ )
__UpperCamelCase = self.embedding_proj(A_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCamelCase = self.encoder_hidden_states_proj(A_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
__UpperCamelCase = self.proj_in(A_ )
__UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
__UpperCamelCase = []
__UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(A_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCamelCase = hidden_states[:, None, :]
__UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(A_ , -1 , -1 )
additional_embeds.append(A_ )
__UpperCamelCase = torch.cat(
A_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCamelCase = F.pad(
A_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
__UpperCamelCase = F.pad(A_ , (0, self.additional_embeddings) , value=0.0 )
__UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCamelCase = self.norm_in(A_ )
for block in self.transformer_blocks:
__UpperCamelCase = block(A_ , attention_mask=A_ )
__UpperCamelCase = self.norm_out(A_ )
if self.prd_embedding is not None:
__UpperCamelCase = hidden_states[:, -1]
else:
__UpperCamelCase = hidden_states[:, additional_embeddings_len:]
__UpperCamelCase = self.proj_to_clip_embeddings(A_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=A_ )
def A ( self : Dict , A_ : Tuple )-> Dict:
__UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 228
| 0
|
import operator
def A ( _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = operator.lt if reverse else operator.gt
_lowerCAmelCase : int = solution or []
if not arr:
return solution
_lowerCAmelCase : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(_lowerCamelCase ):
if _operator(_lowerCamelCase , sublist[-1] ):
sublist.append(_lowerCamelCase )
arr.pop(_lowerCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase )
else:
while sublist:
_lowerCAmelCase : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(_lowerCamelCase ):
if not _operator(_lowerCamelCase , _lowerCamelCase ):
solution.insert(_lowerCamelCase , _lowerCamelCase )
break
else:
solution.append(_lowerCamelCase )
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 500
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'gptj'
lowerCamelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=5_0400, __a=2048, __a=4096, __a=28, __a=16, __a=64, __a=None, __a="gelu_new", __a=0.0, __a=0.0, __a=0.0, __a=1E-5, __a=0.02, __a=True, __a=5_0256, __a=5_0256, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Dict = n_positions
_lowerCAmelCase : Any = n_embd
_lowerCAmelCase : Optional[Any] = n_layer
_lowerCAmelCase : List[str] = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Union[str, Any] = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : List[str] = resid_pdrop
_lowerCAmelCase : int = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Tuple = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : str = bos_token_id
_lowerCAmelCase : List[Any] = eos_token_id
super().__init__(
bos_token_id=__a, eos_token_id=__a, tie_word_embeddings=__a, **__a)
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a = "default", __a = None, __a = False, ):
'''simple docstring'''
super().__init__(__a, task=__a, patching_specs=__a, use_past=__a)
if not getattr(self._config, "pad_token_id", __a):
# TODO: how to do that better?
_lowerCAmelCase : Optional[int] = 0
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(__a, direction="inputs")
_lowerCAmelCase : Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCAmelCase : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_head
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = super(__a, self).generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Tuple = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Optional[int] = seqlen + 2
_lowerCAmelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Optional[int] = [
(torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers)
]
_lowerCAmelCase : Any = common_inputs["attention_mask"]
if self.use_past:
_lowerCAmelCase : Tuple = ordered_inputs["attention_mask"].dtype
_lowerCAmelCase : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__a, __a, dtype=__a)], dim=1)
return ordered_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return 13
| 500
| 1
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__(self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ) -> Dict:
UpperCamelCase_ : List[str] = parent
UpperCamelCase_ : Any = batch_size
UpperCamelCase_ : str = seq_length
UpperCamelCase_ : int = is_training
UpperCamelCase_ : List[Any] = use_input_mask
UpperCamelCase_ : Any = use_token_type_ids
UpperCamelCase_ : str = use_labels
UpperCamelCase_ : Optional[Any] = vocab_size
UpperCamelCase_ : int = hidden_size
UpperCamelCase_ : Dict = num_hidden_layers
UpperCamelCase_ : Optional[Any] = num_attention_heads
UpperCamelCase_ : Optional[int] = intermediate_size
UpperCamelCase_ : int = hidden_act
UpperCamelCase_ : Tuple = hidden_dropout_prob
UpperCamelCase_ : str = attention_probs_dropout_prob
UpperCamelCase_ : Any = max_position_embeddings
UpperCamelCase_ : List[str] = type_vocab_size
UpperCamelCase_ : Tuple = type_sequence_label_size
UpperCamelCase_ : List[Any] = initializer_range
UpperCamelCase_ : str = num_labels
UpperCamelCase_ : str = num_choices
UpperCamelCase_ : List[str] = scope
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : str = None
if self.use_input_mask:
UpperCamelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Any = None
UpperCamelCase_ : List[str] = None
UpperCamelCase_ : List[str] = None
if self.use_labels:
UpperCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ (self ) -> Optional[int]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
UpperCamelCase_ : Optional[int] = BioGptModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
UpperCamelCase_ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Tuple:
UpperCamelCase_ : List[str] = BioGptForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ) -> Tuple:
UpperCamelCase_ : List[str] = BioGptModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# create attention mask
UpperCamelCase_ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCamelCase )
UpperCamelCase_ : List[str] = self.seq_length // 2
UpperCamelCase_ : List[Any] = 0
# first forward pass
UpperCamelCase_,UpperCamelCase_ : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase_ : Optional[int] = ids_tensor((1,) , __UpperCamelCase ).item() + 1
UpperCamelCase_ : str = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCamelCase_ : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__UpperCamelCase )] , dim=1 , )
# get two different outputs
UpperCamelCase_ : Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )["""last_hidden_state"""]
UpperCamelCase_ : List[str] = model(__UpperCamelCase , past_key_values=__UpperCamelCase , attention_mask=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
UpperCamelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase_ : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ) -> List[str]:
UpperCamelCase_ : Any = BioGptModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
UpperCamelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCamelCase )
# first forward pass
UpperCamelCase_ : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase_ : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase )["""last_hidden_state"""]
UpperCamelCase_ : List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[
"""last_hidden_state"""
]
# select random slice
UpperCamelCase_ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
UpperCamelCase_ : int = BioGptForCausalLM(__UpperCamelCase )
model.to(__UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase_ : Dict = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def A_ (self , __UpperCamelCase , *__UpperCamelCase ) -> Optional[int]:
UpperCamelCase_ : str = BioGptModel(__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ) -> str:
UpperCamelCase_ : Tuple = self.num_labels
UpperCamelCase_ : Any = BioGptForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ (self ) -> int:
UpperCamelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),(
UpperCamelCase_
),
) : List[str] = config_and_inputs
UpperCamelCase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __a , __a , __a , unittest.TestCase ):
a__ :List[str] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
a__ :Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
a__ :Dict = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ :Dict = False
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : List[Any] = BioGptModelTester(self )
UpperCamelCase_ : Tuple = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def A_ (self ) -> str:
self.config_tester.run_common_tests()
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def A_ (self ) -> Dict:
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ : str = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__UpperCamelCase )
def A_ (self ) -> List[Any]:
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__UpperCamelCase , gradient_checkpointing=__UpperCamelCase )
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__UpperCamelCase )
def A_ (self ) -> Any:
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__UpperCamelCase )
def A_ (self ) -> Tuple:
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__UpperCamelCase )
@slow
def A_ (self ) -> Tuple:
UpperCamelCase_ : str = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
UpperCamelCase_ : int = """left"""
# Define PAD Token = EOS Token = 50256
UpperCamelCase_ : Any = tokenizer.eos_token
UpperCamelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase_ : str = [
"""Hello, my dog is a little""",
"""Today, I""",
]
UpperCamelCase_ : List[str] = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = inputs["""input_ids"""].to(__UpperCamelCase )
UpperCamelCase_ : List[Any] = model.generate(
input_ids=__UpperCamelCase , attention_mask=inputs["""attention_mask"""].to(__UpperCamelCase ) , )
UpperCamelCase_ : Tuple = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(__UpperCamelCase )
UpperCamelCase_ : int = model.generate(input_ids=__UpperCamelCase )
UpperCamelCase_ : Any = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
UpperCamelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = model.generate(input_ids=__UpperCamelCase , max_length=model.config.max_length - num_paddings )
UpperCamelCase_ : Union[str, Any] = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCamelCase )
UpperCamelCase_ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCamelCase )
UpperCamelCase_ : int = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def A_ (self ) -> Tuple:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : str = BioGptModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A_ (self ) -> Optional[Any]:
UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : int = 3
UpperCamelCase_ : str = input_dict["""input_ids"""]
UpperCamelCase_ : int = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase_ : Dict = BioGptForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : Tuple = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ (self ) -> Optional[Any]:
UpperCamelCase_,UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Dict = 3
UpperCamelCase_ : Dict = """multi_label_classification"""
UpperCamelCase_ : Optional[Any] = input_dict["""input_ids"""]
UpperCamelCase_ : int = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase_ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase_ : Dict = BioGptForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def A_ (self ) -> Any:
UpperCamelCase_ : int = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
UpperCamelCase_ : Dict = torch.tensor([[2, 4_805, 9, 656, 21]] )
UpperCamelCase_ : List[str] = model(__UpperCamelCase )[0]
UpperCamelCase_ : Union[str, Any] = 42_384
UpperCamelCase_ : List[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
UpperCamelCase_ : Tuple = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Any = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
UpperCamelCase_ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(__UpperCamelCase )
torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(__UpperCamelCase )
UpperCamelCase_ : List[Any] = model.generate(
**__UpperCamelCase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=__UpperCamelCase , )
UpperCamelCase_ : Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCamelCase )
UpperCamelCase_ : int = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 138
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ):
# Initialise PyTorch model
UpperCamelCase_ : List[Any] = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_ : Tuple = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 138
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , _lowercase )-> Optional[int]:
UpperCamelCase_ = parent
def UpperCAmelCase_ ( self )-> Tuple:
return {}
def lowerCAmelCase( )-> List[str]:
"""simple docstring"""
UpperCamelCase_ = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
UpperCamelCase_ = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = MarkupLMFeatureExtractionTester(self )
@property
def UpperCAmelCase_ ( self )-> List[str]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self )-> str:
# Initialize feature_extractor
UpperCamelCase_ = self.feature_extraction_class()
# Test not batched input
UpperCamelCase_ = get_html_strings()[0]
UpperCamelCase_ = feature_extractor(_lowercase )
# fmt: off
UpperCamelCase_ = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
UpperCamelCase_ = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , _lowercase )
self.assertEqual(encoding.xpaths , _lowercase )
# Test batched
UpperCamelCase_ = get_html_strings()
UpperCamelCase_ = feature_extractor(_lowercase )
# fmt: off
UpperCamelCase_ = expected_nodes + [["My First Heading", "My first paragraph."]]
UpperCamelCase_ = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _lowercase )
self.assertEqual(encoding.xpaths , _lowercase )
| 628
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , )-> int:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCamelCase_ = []
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = i / num_diffusion_timesteps
UpperCamelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class __magic_name__ ( snake_case , snake_case ):
UpperCamelCase_ :str = [e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase_ :Tuple = 2
@register_to_config
def __init__( self , _lowercase = 1_000 , _lowercase = 0.00_085 , _lowercase = 0.012 , _lowercase = "linear" , _lowercase = None , _lowercase = "epsilon" , _lowercase = "linspace" , _lowercase = 0 , )-> List[Any]:
if trained_betas is not None:
UpperCamelCase_ = torch.tensor(_lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase_ = torch.linspace(_lowercase , _lowercase , _lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase_ = betas_for_alpha_bar(_lowercase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
UpperCamelCase_ = 1.0 - self.betas
UpperCamelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase=None )-> Union[str, Any]:
if schedule_timesteps is None:
UpperCamelCase_ = self.timesteps
UpperCamelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase_ = 1 if len(_lowercase ) > 1 else 0
else:
UpperCamelCase_ = timestep.cpu().item() if torch.is_tensor(_lowercase ) else timestep
UpperCamelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase_ ( self )-> Tuple:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase_ ( self , _lowercase , _lowercase , )-> torch.FloatTensor:
UpperCamelCase_ = self.index_for_timestep(_lowercase )
if self.state_in_first_order:
UpperCamelCase_ = self.sigmas[step_index]
else:
UpperCamelCase_ = self.sigmas_interpol[step_index]
UpperCamelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None , _lowercase = None , )-> Tuple:
UpperCamelCase_ = num_inference_steps
UpperCamelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase_ = np.linspace(0 , num_train_timesteps - 1 , _lowercase , dtype=_lowercase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase_ = (np.arange(0 , _lowercase ) * step_ratio).round()[::-1].copy().astype(_lowercase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase_ = (np.arange(_lowercase , 0 , -step_ratio )).round().copy().astype(_lowercase )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
UpperCamelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase_ = torch.from_numpy(np.log(_lowercase ) ).to(_lowercase )
UpperCamelCase_ = np.interp(_lowercase , np.arange(0 , len(_lowercase ) ) , _lowercase )
UpperCamelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase_ = torch.from_numpy(_lowercase ).to(device=_lowercase )
# interpolate sigmas
UpperCamelCase_ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
UpperCamelCase_ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase_ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_lowercase ).startswith("mps" ):
# mps does not support float64
UpperCamelCase_ = torch.from_numpy(_lowercase ).to(_lowercase , dtype=torch.floataa )
else:
UpperCamelCase_ = torch.from_numpy(_lowercase ).to(_lowercase )
# interpolate timesteps
UpperCamelCase_ = self.sigma_to_t(_lowercase ).to(_lowercase , dtype=timesteps.dtype )
UpperCamelCase_ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
UpperCamelCase_ = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCamelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase_ = defaultdict(_lowercase )
def UpperCAmelCase_ ( self , _lowercase )-> Any:
# get log sigma
UpperCamelCase_ = sigma.log()
# get distribution
UpperCamelCase_ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCamelCase_ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCamelCase_ = low_idx + 1
UpperCamelCase_ = self.log_sigmas[low_idx]
UpperCamelCase_ = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase_ = (low - log_sigma) / (low - high)
UpperCamelCase_ = w.clamp(0 , 1 )
# transform interpolation to time range
UpperCamelCase_ = (1 - w) * low_idx + w * high_idx
UpperCamelCase_ = t.view(sigma.shape )
return t
@property
def UpperCAmelCase_ ( self )-> Any:
return self.sample is None
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase = True , )-> Union[SchedulerOutput, Tuple]:
UpperCamelCase_ = self.index_for_timestep(_lowercase )
# advance index counter by 1
UpperCamelCase_ = timestep.cpu().item() if torch.is_tensor(_lowercase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase_ = self.sigmas[step_index]
UpperCamelCase_ = self.sigmas_interpol[step_index + 1]
UpperCamelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCamelCase_ = self.sigmas[step_index - 1]
UpperCamelCase_ = self.sigmas_interpol[step_index]
UpperCamelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase_ = 0
UpperCamelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase_ = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase_ = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase_ = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCamelCase_ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCamelCase_ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCamelCase_ = sigma_next - sigma_hat
UpperCamelCase_ = self.sample
UpperCamelCase_ = None
UpperCamelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , )-> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowercase ):
# mps does not support float64
UpperCamelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCamelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCamelCase_ = self.timesteps.to(original_samples.device )
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = [self.index_for_timestep(_lowercase , _lowercase ) for t in timesteps]
UpperCamelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase_ = sigma.unsqueeze(-1 )
UpperCamelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__( self )-> Dict:
return self.config.num_train_timesteps
| 628
| 1
|
import numpy as np
def A__ ( __lowerCamelCase ):
return 1 / (1 + np.exp(-vector ))
def A__ ( __lowerCamelCase ):
return vector * sigmoid(1.7_02 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ = True
# sum is not zero and set is empty then false
for i in range(1, required_sum + 1 ):
SCREAMING_SNAKE_CASE_ = False
for i in range(1, arr_len + 1 ):
for j in range(1, required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597
| 1
|
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
a__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __magic_name__ ( __UpperCamelCase ):
def __init__( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , 'decord' )
self.check_model_type(__UpperCAmelCase )
def _lowerCamelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None ):
"""simple docstring"""
_lowerCAmelCase = {}
if frame_sampling_rate is not None:
_lowerCAmelCase = frame_sampling_rate
if num_frames is not None:
_lowerCAmelCase = num_frames
_lowerCAmelCase = {}
if top_k is not None:
_lowerCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=None , __magic_name__=1 ):
"""simple docstring"""
if num_frames is None:
_lowerCAmelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
_lowerCAmelCase = BytesIO(requests.get(__UpperCAmelCase ).content )
_lowerCAmelCase = VideoReader(__UpperCAmelCase )
videoreader.seek(0 )
_lowerCAmelCase = 0
_lowerCAmelCase = num_frames * frame_sampling_rate - 1
_lowerCAmelCase = np.linspace(__UpperCAmelCase , __UpperCAmelCase , num=__UpperCAmelCase , dtype=np.intaa )
_lowerCAmelCase = videoreader.get_batch(__UpperCAmelCase ).asnumpy()
_lowerCAmelCase = list(__UpperCAmelCase )
_lowerCAmelCase = self.image_processor(__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.model(**__UpperCAmelCase )
return model_outputs
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCAmelCase = self.model.config.num_labels
if self.framework == "pt":
_lowerCAmelCase = model_outputs.logits.softmax(-1 )[0]
_lowerCAmelCase = probs.topk(__UpperCAmelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowerCAmelCase = scores.tolist()
_lowerCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase , __UpperCAmelCase )]
| 589
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
UpperCAmelCase__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = ["""input_ids""", """attention_mask"""]
snake_case__ = NllbTokenizer
snake_case__ = []
snake_case__ = []
def __init__(self: Optional[Any] , __UpperCAmelCase: Union[str, Any]=None , __UpperCAmelCase: List[Any]=None , __UpperCAmelCase: Union[str, Any]="<s>" , __UpperCAmelCase: Tuple="</s>" , __UpperCAmelCase: Optional[Any]="</s>" , __UpperCAmelCase: Tuple="<s>" , __UpperCAmelCase: Optional[Any]="<unk>" , __UpperCAmelCase: Dict="<pad>" , __UpperCAmelCase: Any="<mask>" , __UpperCAmelCase: Dict=None , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: List[str]=None , __UpperCAmelCase: List[str]=False , **__UpperCAmelCase: Tuple , ) -> Tuple:
'''simple docstring'''
__a : Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
__a : List[str] = legacy_behaviour
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , )
__a : Union[str, Any] = vocab_file
__a : str = False if not self.vocab_file else True
__a : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
__a : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a : Dict = src_lang if src_lang is not None else "eng_Latn"
__a : str = self.convert_tokens_to_ids(self._src_lang )
__a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase__ (self: List[str] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: str ) -> None:
'''simple docstring'''
__a : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__ (self: Any , __UpperCAmelCase: List[int] , __UpperCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: List[int] , __UpperCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__a : int = [self.sep_token_id]
__a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ (self: str , __UpperCAmelCase: Dict , __UpperCAmelCase: str , __UpperCAmelCase: Optional[str] , __UpperCAmelCase: Optional[str] , **__UpperCAmelCase: Optional[int] ) -> int:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__a : Tuple = src_lang
__a : Optional[Any] = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
__a : List[Any] = self.convert_tokens_to_ids(__UpperCAmelCase )
__a : List[Any] = tgt_lang_id
return inputs
def UpperCAmelCase__ (self: Optional[int] , __UpperCAmelCase: List[str] , __UpperCAmelCase: str = "eng_Latn" , __UpperCAmelCase: Optional[List[str]] = None , __UpperCAmelCase: str = "fra_Latn" , **__UpperCAmelCase: Dict , ) -> BatchEncoding:
'''simple docstring'''
__a : Optional[int] = src_lang
__a : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase__ (self: List[Any] ) -> int:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ (self: Optional[int] , __UpperCAmelCase: Union[str, Any] ) -> None:
'''simple docstring'''
__a : Optional[Any] = self.convert_tokens_to_ids(__UpperCAmelCase )
if self.legacy_behaviour:
__a : Dict = []
__a : str = [self.eos_token_id, self.cur_lang_code]
else:
__a : Optional[Any] = [self.cur_lang_code]
__a : Optional[int] = [self.eos_token_id]
__a : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
__a : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__a : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase__ (self: str , __UpperCAmelCase: str ) -> None:
'''simple docstring'''
__a : List[Any] = self.convert_tokens_to_ids(__UpperCAmelCase )
if self.legacy_behaviour:
__a : Any = []
__a : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
__a : Union[str, Any] = [self.cur_lang_code]
__a : List[str] = [self.eos_token_id]
__a : str = self.convert_ids_to_tokens(self.prefix_tokens )
__a : str = self.convert_ids_to_tokens(self.suffix_tokens )
__a : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase__ (self: str , __UpperCAmelCase: str , __UpperCAmelCase: Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
__a : Tuple = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 351
| 0
|
from __future__ import annotations
def __UpperCamelCase ( a, a = None) ->List[str]:
lowerCamelCase__ = word_bank or []
# create a table
lowerCamelCase__ = len(snake_case_) + 1
lowerCamelCase__ = []
for _ in range(snake_case_):
table.append([])
# seed value
lowerCamelCase__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_)] == word:
lowerCamelCase__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_)] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_)]:
combination.reverse()
return table[len(snake_case_)]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 710
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ = logging.get_logger(__name__)
A_ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = "longformer"
def __init__( self , _lowerCAmelCase = 512 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0522 , _lowerCAmelCase = 768 , _lowerCAmelCase = 12 , _lowerCAmelCase = 12 , _lowerCAmelCase = 3072 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 512 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase__ = attention_window
lowerCamelCase__ = sep_token_id
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = onnx_export
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = True
@property
def __magic_name__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __magic_name__ ( self ):
lowerCamelCase__ = super().outputs
if self.task == "default":
lowerCamelCase__ = {0: "batch"}
return outputs
@property
def __magic_name__ ( self ):
return 1E-4
@property
def __magic_name__ ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
lowerCamelCase__ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowerCamelCase__ = 1
return inputs
| 360
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Dict = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[Any] = 'vit_mae'
def __init__( self : str , lowerCAmelCase__ : Optional[int]=768 , lowerCAmelCase__ : Optional[int]=12 , lowerCAmelCase__ : List[Any]=12 , lowerCAmelCase__ : Dict=3072 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : List[str]=1e-1_2 , lowerCAmelCase__ : Union[str, Any]=224 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=16 , lowerCAmelCase__ : Union[str, Any]=512 , lowerCAmelCase__ : Optional[Any]=8 , lowerCAmelCase__ : Optional[Any]=2048 , lowerCAmelCase__ : Optional[int]=0.75 , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : int , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
_UpperCamelCase = decoder_num_attention_heads
_UpperCamelCase = decoder_hidden_size
_UpperCamelCase = decoder_num_hidden_layers
_UpperCamelCase = decoder_intermediate_size
_UpperCamelCase = mask_ratio
_UpperCamelCase = norm_pix_loss
| 98
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''distilbert'''
UpperCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , A=3_0522 , A=512 , A=False , A=6 , A=12 , A=768 , A=4 * 768 , A=0.1 , A=0.1 , A="gelu" , A=0.02 , A=0.1 , A=0.2 , A=0 , **A , ) -> Dict:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = sinusoidal_pos_embds
_SCREAMING_SNAKE_CASE = n_layers
_SCREAMING_SNAKE_CASE = n_heads
_SCREAMING_SNAKE_CASE = dim
_SCREAMING_SNAKE_CASE = hidden_dim
_SCREAMING_SNAKE_CASE = dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = qa_dropout
_SCREAMING_SNAKE_CASE = seq_classif_dropout
super().__init__(**A , pad_token_id=A )
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 314
| 0
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
_snake_case = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
_snake_case = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
_snake_case = [0] * args.vocab_size
for k, v in counter.items():
_snake_case = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 715
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Tuple=None , ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MPNetForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_: int = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: Dict = True
def __lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_ )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_lowerCAmelCase = model(UpperCAmelCase_ )[0]
_lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_lowerCAmelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 491
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Any = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
|
import math
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = len(_A )
__magic_name__ : Tuple = int(math.floor(math.sqrt(_A ) ) )
__magic_name__ : Optional[int] = 0
while arr[min(_A, _A ) - 1] < x:
__magic_name__ : Tuple = step
step += int(math.floor(math.sqrt(_A ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__magic_name__ : Union[str, Any] = prev + 1
if prev == min(_A, _A ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__magic_name__: List[Any] = input("Enter numbers separated by a comma:\n").strip()
__magic_name__: List[str] = [int(item) for item in user_input.split(",")]
__magic_name__: Optional[int] = int(input("Enter the number to be searched:\n"))
__magic_name__: str = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F"""Number {x} is at index {res}""")
| 324
| 0
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : Tuple = '''AutoImageProcessor'''
__UpperCAmelCase : Optional[Any] = '''AutoTokenizer'''
def __init__( self : Dict ,_a : Any=None ,_a : Union[str, Any]=None ,**_a : int ):
'''simple docstring'''
_a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,_a ,)
_a : List[Any] = kwargs.pop('feature_extractor' )
_a : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_a ,_a )
_a : Tuple = self.image_processor
_a : int = False
def __call__( self : Union[str, Any] ,*_a : Any ,**_a : str ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a ,**_a )
_a : Optional[Any] = kwargs.pop('images' ,_a )
_a : List[str] = kwargs.pop('text' ,_a )
if len(_a ) > 0:
_a : Dict = args[0]
_a : Optional[int] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_a : Any = self.image_processor(_a ,*_a ,**_a )
if text is not None:
_a : Optional[Any] = self.tokenizer(_a ,**_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
_a : Optional[int] = encodings['input_ids']
return inputs
def __lowercase ( self : Optional[Any] ,*_a : Dict ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def __lowercase ( self : Optional[Any] ,*_a : Tuple ,**_a : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@contextmanager
def __lowercase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
_a : Union[str, Any] = True
_a : int = self.tokenizer
yield
_a : Union[str, Any] = self.image_processor
_a : Tuple = False
def __lowercase ( self : Optional[Any] ,_a : Dict ,_a : Optional[Any]=False ,_a : List[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_a : Dict = self.tokenizer.get_added_vocab()
_a : Union[str, Any] = {}
while tokens:
_a : Tuple = re.search(R'<s_(.*?)>' ,_a ,re.IGNORECASE )
if start_token is None:
break
_a : int = start_token.group(1 )
_a : int = re.search(RF"""</s_{key}>""" ,_a ,re.IGNORECASE )
_a : Union[str, Any] = start_token.group()
if end_token is None:
_a : Union[str, Any] = tokens.replace(_a ,'' )
else:
_a : List[str] = end_token.group()
_a : Any = re.escape(_a )
_a : Optional[int] = re.escape(_a )
_a : Any = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" ,_a ,re.IGNORECASE )
if content is not None:
_a : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_a : int = self.tokenajson(_a ,is_inner_value=_a ,added_vocab=_a )
if value:
if len(_a ) == 1:
_a : Tuple = value[0]
_a : List[Any] = value
else: # leaf nodes
_a : int = []
for leaf in content.split(R'<sep/>' ):
_a : Any = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_a : List[str] = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
_a : Optional[int] = output[key][0]
_a : Dict = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=_a ,added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,_a ,)
return self.image_processor_class
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,_a ,)
return self.image_processor
| 715
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = 1
@register_to_config
def __init__( self : Tuple ,_a : Dict=2000 ,_a : Union[str, Any]=0.1 ,_a : Dict=20 ,_a : List[Any]=1E-3 ):
'''simple docstring'''
_a : Dict = None
_a : int = None
_a : Union[str, Any] = None
def __lowercase ( self : Any ,_a : Dict ,_a : Union[str, torch.device] = None ):
'''simple docstring'''
_a : List[Any] = torch.linspace(1 ,self.config.sampling_eps ,_a ,device=_a )
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[int] ,_a : Any ,_a : Union[str, Any]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_a : int = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_a : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_a : Dict = std.flatten()
while len(std.shape ) < len(score.shape ):
_a : Any = std.unsqueeze(-1 )
_a : int = -score / std
# compute
_a : List[Any] = -1.0 / len(self.timesteps )
_a : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_a : Union[str, Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_a : List[str] = beta_t.unsqueeze(-1 )
_a : Any = -0.5 * beta_t * x
_a : Any = torch.sqrt(_a )
_a : Optional[Any] = drift - diffusion**2 * score
_a : List[Any] = x + drift * dt
# add noise
_a : int = randn_tensor(x.shape ,layout=x.layout ,generator=_a ,device=x.device ,dtype=x.dtype )
_a : Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 319
| 0
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _A ( A__ ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowercase = model_type_to_module_name(A__ )
__lowercase = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(A__ , '''__name__''' , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowercase = importlib.import_module('''transformers''' )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def _A ( A__ , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , A__ = None , A__ = False , **A__ , ):
"""simple docstring"""
__lowercase = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(A__ , encoding='''utf-8''' ) as reader:
return json.load(A__ )
class lowercase_ :
"""simple docstring"""
def __init__( self : int ):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowercase__ )
def SCREAMING_SNAKE_CASE ( cls : str ,lowercase__ : str ,**lowercase__ : int ):
__lowercase = kwargs.pop('''config''' ,lowercase__ )
__lowercase = kwargs.pop('''trust_remote_code''' ,lowercase__ )
__lowercase = True
__lowercase , __lowercase = ImageProcessingMixin.get_image_processor_dict(lowercase__ ,**lowercase__ )
__lowercase = config_dict.get('''image_processor_type''' ,lowercase__ )
__lowercase = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' ,{} ):
__lowercase = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__lowercase = config_dict.pop('''feature_extractor_type''' ,lowercase__ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
__lowercase = feature_extractor_class.replace('''FeatureExtractor''' ,'''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' ,{} ):
__lowercase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
__lowercase = feature_extractor_auto_map.replace('''FeatureExtractor''' ,'''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase__ ,lowercase__ ):
__lowercase = AutoConfig.from_pretrained(lowercase__ ,**lowercase__ )
# It could be in `config.image_processor_type``
__lowercase = getattr(lowercase__ ,'''image_processor_type''' ,lowercase__ )
if hasattr(lowercase__ ,'''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
__lowercase = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
__lowercase = image_processor_class_from_name(lowercase__ )
__lowercase = image_processor_auto_map is not None
__lowercase = image_processor_class is not None or type(lowercase__ ) in IMAGE_PROCESSOR_MAPPING
__lowercase = resolve_trust_remote_code(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if has_remote_code and trust_remote_code:
__lowercase = get_class_from_dynamic_module(
lowercase__ ,lowercase__ ,**lowercase__ )
__lowercase = kwargs.pop('''code_revision''' ,lowercase__ )
if os.path.isdir(lowercase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase__ ,**lowercase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase__ ,**lowercase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase__ ) in IMAGE_PROCESSOR_MAPPING:
__lowercase = IMAGE_PROCESSOR_MAPPING[type(lowercase__ )]
return image_processor_class.from_dict(lowercase__ ,**lowercase__ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Tuple ,lowercase__ : Any ):
IMAGE_PROCESSOR_MAPPING.register(lowercase__ ,lowercase__ )
| 41
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=0.9 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
a_ = size if size is not None else {"""shortest_edge""": 30}
a_ = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = min_resolution
a_ = max_resolution
a_ = do_resize_and_center_crop
a_ = size
a_ = crop_pct
a_ = crop_size
a_ = do_normalize
a_ = image_mean
a_ = image_std
def lowercase__ ( self ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase_ ( UpperCamelCase__ ,unittest.TestCase):
"""simple docstring"""
snake_case_ = PoolFormerImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
a_ = PoolFormerImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """size""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """crop_pct""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """image_std""" ) )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
a_ = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
a_ = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
a_ = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 483
| 0
|
"""simple docstring"""
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__A = TypeVar('''T''')
class _snake_case ( Generic[T] ):
def __init__( self : Union[str, Any] , UpperCAmelCase : Any = True ):
__lowerCamelCase : List[str] = {} # dictionary of lists
__lowerCamelCase : str = directed
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : int ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
self.adj_list[destination_vertex].append(lowerCAmelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
__lowerCamelCase : List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCAmelCase_ )
__lowerCamelCase : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__lowerCamelCase : Tuple = [destination_vertex]
__lowerCamelCase : List[str] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
__lowerCamelCase : Any = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__lowerCamelCase : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__lowerCamelCase : Optional[int] = [destination_vertex]
__lowerCamelCase : str = []
return self
def __repr__( self : Optional[Any] ):
return pformat(self.adj_list )
| 719
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=18 , UpperCAmelCase : str=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=True , UpperCAmelCase : int=[0.5, 0.5, 0.5] , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , ):
__lowerCamelCase : Any = size if size is not None else {"shortest_edge": 18}
__lowerCamelCase : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCamelCase : List[str] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : int = min_resolution
__lowerCamelCase : List[Any] = max_resolution
__lowerCamelCase : List[str] = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : Optional[int] = crop_size
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : Optional[Any] = image_mean
__lowerCamelCase : List[Any] = image_std
def lowerCamelCase__ ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[int] = LevitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : int = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : Dict = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : List[str] = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 366
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.